##// END OF EJS Templates
merge crew and main
Nicolas Dumazet -
r11711:a2d45964 merge default
parent child Browse files
Show More
@@ -1,531 +1,534 b''
1 # Mercurial extension to provide the 'hg bookmark' command
1 # Mercurial extension to provide the 'hg bookmark' command
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''track a line of development with movable markers
8 '''track a line of development with movable markers
9
9
10 Bookmarks are local movable markers to changesets. Every bookmark
10 Bookmarks are local movable markers to changesets. Every bookmark
11 points to a changeset identified by its hash. If you commit a
11 points to a changeset identified by its hash. If you commit a
12 changeset that is based on a changeset that has a bookmark on it, the
12 changeset that is based on a changeset that has a bookmark on it, the
13 bookmark shifts to the new changeset.
13 bookmark shifts to the new changeset.
14
14
15 It is possible to use bookmark names in every revision lookup (e.g.
15 It is possible to use bookmark names in every revision lookup (e.g.
16 :hg:`merge`, :hg:`update`).
16 :hg:`merge`, :hg:`update`).
17
17
18 By default, when several bookmarks point to the same changeset, they
18 By default, when several bookmarks point to the same changeset, they
19 will all move forward together. It is possible to obtain a more
19 will all move forward together. It is possible to obtain a more
20 git-like experience by adding the following configuration option to
20 git-like experience by adding the following configuration option to
21 your .hgrc::
21 your .hgrc::
22
22
23 [bookmarks]
23 [bookmarks]
24 track.current = True
24 track.current = True
25
25
26 This will cause Mercurial to track the bookmark that you are currently
26 This will cause Mercurial to track the bookmark that you are currently
27 using, and only update it. This is similar to git's approach to
27 using, and only update it. This is similar to git's approach to
28 branching.
28 branching.
29 '''
29 '''
30
30
31 from mercurial.i18n import _
31 from mercurial.i18n import _
32 from mercurial.node import nullid, nullrev, hex, short
32 from mercurial.node import nullid, nullrev, hex, short
33 from mercurial import util, commands, repair, extensions, pushkey, hg, url
33 from mercurial import util, commands, repair, extensions, pushkey, hg, url
34 import os
34 import os
35
35
36 def write(repo):
36 def write(repo):
37 '''Write bookmarks
37 '''Write bookmarks
38
38
39 Write the given bookmark => hash dictionary to the .hg/bookmarks file
39 Write the given bookmark => hash dictionary to the .hg/bookmarks file
40 in a format equal to those of localtags.
40 in a format equal to those of localtags.
41
41
42 We also store a backup of the previous state in undo.bookmarks that
42 We also store a backup of the previous state in undo.bookmarks that
43 can be copied back on rollback.
43 can be copied back on rollback.
44 '''
44 '''
45 refs = repo._bookmarks
45 refs = repo._bookmarks
46 if os.path.exists(repo.join('bookmarks')):
46 if os.path.exists(repo.join('bookmarks')):
47 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
47 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
48 if repo._bookmarkcurrent not in refs:
48 if repo._bookmarkcurrent not in refs:
49 setcurrent(repo, None)
49 setcurrent(repo, None)
50 wlock = repo.wlock()
50 wlock = repo.wlock()
51 try:
51 try:
52 file = repo.opener('bookmarks', 'w', atomictemp=True)
52 file = repo.opener('bookmarks', 'w', atomictemp=True)
53 for refspec, node in refs.iteritems():
53 for refspec, node in refs.iteritems():
54 file.write("%s %s\n" % (hex(node), refspec))
54 file.write("%s %s\n" % (hex(node), refspec))
55 file.rename()
55 file.rename()
56
56
57 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
57 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
58 try:
58 try:
59 os.utime(repo.sjoin('00changelog.i'), None)
59 os.utime(repo.sjoin('00changelog.i'), None)
60 except OSError:
60 except OSError:
61 pass
61 pass
62
62
63 finally:
63 finally:
64 wlock.release()
64 wlock.release()
65
65
66 def setcurrent(repo, mark):
66 def setcurrent(repo, mark):
67 '''Set the name of the bookmark that we are currently on
67 '''Set the name of the bookmark that we are currently on
68
68
69 Set the name of the bookmark that we are on (hg update <bookmark>).
69 Set the name of the bookmark that we are on (hg update <bookmark>).
70 The name is recorded in .hg/bookmarks.current
70 The name is recorded in .hg/bookmarks.current
71 '''
71 '''
72 current = repo._bookmarkcurrent
72 current = repo._bookmarkcurrent
73 if current == mark:
73 if current == mark:
74 return
74 return
75
75
76 refs = repo._bookmarks
76 refs = repo._bookmarks
77
77
78 # do not update if we do update to a rev equal to the current bookmark
78 # do not update if we do update to a rev equal to the current bookmark
79 if (mark and mark not in refs and
79 if (mark and mark not in refs and
80 current and refs[current] == repo.changectx('.').node()):
80 current and refs[current] == repo.changectx('.').node()):
81 return
81 return
82 if mark not in refs:
82 if mark not in refs:
83 mark = ''
83 mark = ''
84 wlock = repo.wlock()
84 wlock = repo.wlock()
85 try:
85 try:
86 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
86 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
87 file.write(mark)
87 file.write(mark)
88 file.rename()
88 file.rename()
89 finally:
89 finally:
90 wlock.release()
90 wlock.release()
91 repo._bookmarkcurrent = mark
91 repo._bookmarkcurrent = mark
92
92
93 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
93 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
94 '''track a line of development with movable markers
94 '''track a line of development with movable markers
95
95
96 Bookmarks are pointers to certain commits that move when
96 Bookmarks are pointers to certain commits that move when
97 committing. Bookmarks are local. They can be renamed, copied and
97 committing. Bookmarks are local. They can be renamed, copied and
98 deleted. It is possible to use bookmark names in :hg:`merge` and
98 deleted. It is possible to use bookmark names in :hg:`merge` and
99 :hg:`update` to merge and update respectively to a given bookmark.
99 :hg:`update` to merge and update respectively to a given bookmark.
100
100
101 You can use :hg:`bookmark NAME` to set a bookmark on the working
101 You can use :hg:`bookmark NAME` to set a bookmark on the working
102 directory's parent revision with the given name. If you specify
102 directory's parent revision with the given name. If you specify
103 a revision using -r REV (where REV may be an existing bookmark),
103 a revision using -r REV (where REV may be an existing bookmark),
104 the bookmark is assigned to that revision.
104 the bookmark is assigned to that revision.
105 '''
105 '''
106 hexfn = ui.debugflag and hex or short
106 hexfn = ui.debugflag and hex or short
107 marks = repo._bookmarks
107 marks = repo._bookmarks
108 cur = repo.changectx('.').node()
108 cur = repo.changectx('.').node()
109
109
110 if rename:
110 if rename:
111 if rename not in marks:
111 if rename not in marks:
112 raise util.Abort(_("a bookmark of this name does not exist"))
112 raise util.Abort(_("a bookmark of this name does not exist"))
113 if mark in marks and not force:
113 if mark in marks and not force:
114 raise util.Abort(_("a bookmark of the same name already exists"))
114 raise util.Abort(_("a bookmark of the same name already exists"))
115 if mark is None:
115 if mark is None:
116 raise util.Abort(_("new bookmark name required"))
116 raise util.Abort(_("new bookmark name required"))
117 marks[mark] = marks[rename]
117 marks[mark] = marks[rename]
118 del marks[rename]
118 del marks[rename]
119 if repo._bookmarkcurrent == rename:
119 if repo._bookmarkcurrent == rename:
120 setcurrent(repo, mark)
120 setcurrent(repo, mark)
121 write(repo)
121 write(repo)
122 return
122 return
123
123
124 if delete:
124 if delete:
125 if mark is None:
125 if mark is None:
126 raise util.Abort(_("bookmark name required"))
126 raise util.Abort(_("bookmark name required"))
127 if mark not in marks:
127 if mark not in marks:
128 raise util.Abort(_("a bookmark of this name does not exist"))
128 raise util.Abort(_("a bookmark of this name does not exist"))
129 if mark == repo._bookmarkcurrent:
129 if mark == repo._bookmarkcurrent:
130 setcurrent(repo, None)
130 setcurrent(repo, None)
131 del marks[mark]
131 del marks[mark]
132 write(repo)
132 write(repo)
133 return
133 return
134
134
135 if mark != None:
135 if mark != None:
136 if "\n" in mark:
136 if "\n" in mark:
137 raise util.Abort(_("bookmark name cannot contain newlines"))
137 raise util.Abort(_("bookmark name cannot contain newlines"))
138 mark = mark.strip()
138 mark = mark.strip()
139 if not mark:
140 raise util.Abort(_("bookmark names cannot consist entirely of "
141 "whitespace"))
139 if mark in marks and not force:
142 if mark in marks and not force:
140 raise util.Abort(_("a bookmark of the same name already exists"))
143 raise util.Abort(_("a bookmark of the same name already exists"))
141 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
144 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
142 and not force):
145 and not force):
143 raise util.Abort(
146 raise util.Abort(
144 _("a bookmark cannot have the name of an existing branch"))
147 _("a bookmark cannot have the name of an existing branch"))
145 if rev:
148 if rev:
146 marks[mark] = repo.lookup(rev)
149 marks[mark] = repo.lookup(rev)
147 else:
150 else:
148 marks[mark] = repo.changectx('.').node()
151 marks[mark] = repo.changectx('.').node()
149 setcurrent(repo, mark)
152 setcurrent(repo, mark)
150 write(repo)
153 write(repo)
151 return
154 return
152
155
153 if mark is None:
156 if mark is None:
154 if rev:
157 if rev:
155 raise util.Abort(_("bookmark name required"))
158 raise util.Abort(_("bookmark name required"))
156 if len(marks) == 0:
159 if len(marks) == 0:
157 ui.status(_("no bookmarks set\n"))
160 ui.status(_("no bookmarks set\n"))
158 else:
161 else:
159 for bmark, n in marks.iteritems():
162 for bmark, n in marks.iteritems():
160 if ui.configbool('bookmarks', 'track.current'):
163 if ui.configbool('bookmarks', 'track.current'):
161 current = repo._bookmarkcurrent
164 current = repo._bookmarkcurrent
162 if bmark == current and n == cur:
165 if bmark == current and n == cur:
163 prefix, label = '*', 'bookmarks.current'
166 prefix, label = '*', 'bookmarks.current'
164 else:
167 else:
165 prefix, label = ' ', ''
168 prefix, label = ' ', ''
166 else:
169 else:
167 if n == cur:
170 if n == cur:
168 prefix, label = '*', 'bookmarks.current'
171 prefix, label = '*', 'bookmarks.current'
169 else:
172 else:
170 prefix, label = ' ', ''
173 prefix, label = ' ', ''
171
174
172 if ui.quiet:
175 if ui.quiet:
173 ui.write("%s\n" % bmark, label=label)
176 ui.write("%s\n" % bmark, label=label)
174 else:
177 else:
175 ui.write(" %s %-25s %d:%s\n" % (
178 ui.write(" %s %-25s %d:%s\n" % (
176 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
179 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
177 label=label)
180 label=label)
178 return
181 return
179
182
180 def _revstostrip(changelog, node):
183 def _revstostrip(changelog, node):
181 srev = changelog.rev(node)
184 srev = changelog.rev(node)
182 tostrip = [srev]
185 tostrip = [srev]
183 saveheads = []
186 saveheads = []
184 for r in xrange(srev, len(changelog)):
187 for r in xrange(srev, len(changelog)):
185 parents = changelog.parentrevs(r)
188 parents = changelog.parentrevs(r)
186 if parents[0] in tostrip or parents[1] in tostrip:
189 if parents[0] in tostrip or parents[1] in tostrip:
187 tostrip.append(r)
190 tostrip.append(r)
188 if parents[1] != nullrev:
191 if parents[1] != nullrev:
189 for p in parents:
192 for p in parents:
190 if p not in tostrip and p > srev:
193 if p not in tostrip and p > srev:
191 saveheads.append(p)
194 saveheads.append(p)
192 return [r for r in tostrip if r not in saveheads]
195 return [r for r in tostrip if r not in saveheads]
193
196
194 def strip(oldstrip, ui, repo, node, backup="all"):
197 def strip(oldstrip, ui, repo, node, backup="all"):
195 """Strip bookmarks if revisions are stripped using
198 """Strip bookmarks if revisions are stripped using
196 the mercurial.strip method. This usually happens during
199 the mercurial.strip method. This usually happens during
197 qpush and qpop"""
200 qpush and qpop"""
198 revisions = _revstostrip(repo.changelog, node)
201 revisions = _revstostrip(repo.changelog, node)
199 marks = repo._bookmarks
202 marks = repo._bookmarks
200 update = []
203 update = []
201 for mark, n in marks.iteritems():
204 for mark, n in marks.iteritems():
202 if repo.changelog.rev(n) in revisions:
205 if repo.changelog.rev(n) in revisions:
203 update.append(mark)
206 update.append(mark)
204 oldstrip(ui, repo, node, backup)
207 oldstrip(ui, repo, node, backup)
205 if len(update) > 0:
208 if len(update) > 0:
206 for m in update:
209 for m in update:
207 marks[m] = repo.changectx('.').node()
210 marks[m] = repo.changectx('.').node()
208 write(repo)
211 write(repo)
209
212
210 def reposetup(ui, repo):
213 def reposetup(ui, repo):
211 if not repo.local():
214 if not repo.local():
212 return
215 return
213
216
214 class bookmark_repo(repo.__class__):
217 class bookmark_repo(repo.__class__):
215
218
216 @util.propertycache
219 @util.propertycache
217 def _bookmarks(self):
220 def _bookmarks(self):
218 '''Parse .hg/bookmarks file and return a dictionary
221 '''Parse .hg/bookmarks file and return a dictionary
219
222
220 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
223 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
221 in the .hg/bookmarks file.
224 in the .hg/bookmarks file.
222 Read the file and return a (name=>nodeid) dictionary
225 Read the file and return a (name=>nodeid) dictionary
223 '''
226 '''
224 try:
227 try:
225 bookmarks = {}
228 bookmarks = {}
226 for line in self.opener('bookmarks'):
229 for line in self.opener('bookmarks'):
227 sha, refspec = line.strip().split(' ', 1)
230 sha, refspec = line.strip().split(' ', 1)
228 bookmarks[refspec] = super(bookmark_repo, self).lookup(sha)
231 bookmarks[refspec] = super(bookmark_repo, self).lookup(sha)
229 except:
232 except:
230 pass
233 pass
231 return bookmarks
234 return bookmarks
232
235
233 @util.propertycache
236 @util.propertycache
234 def _bookmarkcurrent(self):
237 def _bookmarkcurrent(self):
235 '''Get the current bookmark
238 '''Get the current bookmark
236
239
237 If we use gittishsh branches we have a current bookmark that
240 If we use gittishsh branches we have a current bookmark that
238 we are on. This function returns the name of the bookmark. It
241 we are on. This function returns the name of the bookmark. It
239 is stored in .hg/bookmarks.current
242 is stored in .hg/bookmarks.current
240 '''
243 '''
241 mark = None
244 mark = None
242 if os.path.exists(self.join('bookmarks.current')):
245 if os.path.exists(self.join('bookmarks.current')):
243 file = self.opener('bookmarks.current')
246 file = self.opener('bookmarks.current')
244 # No readline() in posixfile_nt, reading everything is cheap
247 # No readline() in posixfile_nt, reading everything is cheap
245 mark = (file.readlines() or [''])[0]
248 mark = (file.readlines() or [''])[0]
246 if mark == '':
249 if mark == '':
247 mark = None
250 mark = None
248 file.close()
251 file.close()
249 return mark
252 return mark
250
253
251 def rollback(self, *args):
254 def rollback(self, *args):
252 if os.path.exists(self.join('undo.bookmarks')):
255 if os.path.exists(self.join('undo.bookmarks')):
253 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
256 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
254 return super(bookmark_repo, self).rollback(*args)
257 return super(bookmark_repo, self).rollback(*args)
255
258
256 def lookup(self, key):
259 def lookup(self, key):
257 if key in self._bookmarks:
260 if key in self._bookmarks:
258 key = self._bookmarks[key]
261 key = self._bookmarks[key]
259 return super(bookmark_repo, self).lookup(key)
262 return super(bookmark_repo, self).lookup(key)
260
263
261 def _bookmarksupdate(self, parents, node):
264 def _bookmarksupdate(self, parents, node):
262 marks = self._bookmarks
265 marks = self._bookmarks
263 update = False
266 update = False
264 if ui.configbool('bookmarks', 'track.current'):
267 if ui.configbool('bookmarks', 'track.current'):
265 mark = self._bookmarkcurrent
268 mark = self._bookmarkcurrent
266 if mark and marks[mark] in parents:
269 if mark and marks[mark] in parents:
267 marks[mark] = node
270 marks[mark] = node
268 update = True
271 update = True
269 else:
272 else:
270 for mark, n in marks.items():
273 for mark, n in marks.items():
271 if n in parents:
274 if n in parents:
272 marks[mark] = node
275 marks[mark] = node
273 update = True
276 update = True
274 if update:
277 if update:
275 write(self)
278 write(self)
276
279
277 def commitctx(self, ctx, error=False):
280 def commitctx(self, ctx, error=False):
278 """Add a revision to the repository and
281 """Add a revision to the repository and
279 move the bookmark"""
282 move the bookmark"""
280 wlock = self.wlock() # do both commit and bookmark with lock held
283 wlock = self.wlock() # do both commit and bookmark with lock held
281 try:
284 try:
282 node = super(bookmark_repo, self).commitctx(ctx, error)
285 node = super(bookmark_repo, self).commitctx(ctx, error)
283 if node is None:
286 if node is None:
284 return None
287 return None
285 parents = self.changelog.parents(node)
288 parents = self.changelog.parents(node)
286 if parents[1] == nullid:
289 if parents[1] == nullid:
287 parents = (parents[0],)
290 parents = (parents[0],)
288
291
289 self._bookmarksupdate(parents, node)
292 self._bookmarksupdate(parents, node)
290 return node
293 return node
291 finally:
294 finally:
292 wlock.release()
295 wlock.release()
293
296
294 def pull(self, remote, heads=None, force=False):
297 def pull(self, remote, heads=None, force=False):
295 result = super(bookmark_repo, self).pull(remote, heads, force)
298 result = super(bookmark_repo, self).pull(remote, heads, force)
296
299
297 self.ui.debug("checking for updated bookmarks\n")
300 self.ui.debug("checking for updated bookmarks\n")
298 rb = remote.listkeys('bookmarks')
301 rb = remote.listkeys('bookmarks')
299 changes = 0
302 changes = 0
300 for k in rb.keys():
303 for k in rb.keys():
301 if k in self._bookmarks:
304 if k in self._bookmarks:
302 nr, nl = rb[k], self._bookmarks[k]
305 nr, nl = rb[k], self._bookmarks[k]
303 if nr in self:
306 if nr in self:
304 cr = self[nr]
307 cr = self[nr]
305 cl = self[nl]
308 cl = self[nl]
306 if cl.rev() >= cr.rev():
309 if cl.rev() >= cr.rev():
307 continue
310 continue
308 if cr in cl.descendants():
311 if cr in cl.descendants():
309 self._bookmarks[k] = cr.node()
312 self._bookmarks[k] = cr.node()
310 changes += 1
313 changes += 1
311 self.ui.status(_("updating bookmark %s\n") % k)
314 self.ui.status(_("updating bookmark %s\n") % k)
312 else:
315 else:
313 self.ui.warn(_("not updating divergent"
316 self.ui.warn(_("not updating divergent"
314 " bookmark %s\n") % k)
317 " bookmark %s\n") % k)
315 if changes:
318 if changes:
316 write(repo)
319 write(repo)
317
320
318 return result
321 return result
319
322
320 def push(self, remote, force=False, revs=None, newbranch=False):
323 def push(self, remote, force=False, revs=None, newbranch=False):
321 result = super(bookmark_repo, self).push(remote, force, revs,
324 result = super(bookmark_repo, self).push(remote, force, revs,
322 newbranch)
325 newbranch)
323
326
324 self.ui.debug("checking for updated bookmarks\n")
327 self.ui.debug("checking for updated bookmarks\n")
325 rb = remote.listkeys('bookmarks')
328 rb = remote.listkeys('bookmarks')
326 for k in rb.keys():
329 for k in rb.keys():
327 if k in self._bookmarks:
330 if k in self._bookmarks:
328 nr, nl = rb[k], self._bookmarks[k]
331 nr, nl = rb[k], self._bookmarks[k]
329 if nr in self:
332 if nr in self:
330 cr = self[nr]
333 cr = self[nr]
331 cl = self[nl]
334 cl = self[nl]
332 if cl in cr.descendants():
335 if cl in cr.descendants():
333 r = remote.pushkey('bookmarks', k, nr, nl)
336 r = remote.pushkey('bookmarks', k, nr, nl)
334 if r:
337 if r:
335 self.ui.status(_("updating bookmark %s\n") % k)
338 self.ui.status(_("updating bookmark %s\n") % k)
336 else:
339 else:
337 self.ui.warn(_('updating bookmark %s'
340 self.ui.warn(_('updating bookmark %s'
338 ' failed!\n') % k)
341 ' failed!\n') % k)
339
342
340 return result
343 return result
341
344
342 def addchangegroup(self, *args, **kwargs):
345 def addchangegroup(self, *args, **kwargs):
343 parents = self.dirstate.parents()
346 parents = self.dirstate.parents()
344
347
345 result = super(bookmark_repo, self).addchangegroup(*args, **kwargs)
348 result = super(bookmark_repo, self).addchangegroup(*args, **kwargs)
346 if result > 1:
349 if result > 1:
347 # We have more heads than before
350 # We have more heads than before
348 return result
351 return result
349 node = self.changelog.tip()
352 node = self.changelog.tip()
350
353
351 self._bookmarksupdate(parents, node)
354 self._bookmarksupdate(parents, node)
352 return result
355 return result
353
356
354 def _findtags(self):
357 def _findtags(self):
355 """Merge bookmarks with normal tags"""
358 """Merge bookmarks with normal tags"""
356 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
359 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
357 tags.update(self._bookmarks)
360 tags.update(self._bookmarks)
358 return (tags, tagtypes)
361 return (tags, tagtypes)
359
362
360 if hasattr(repo, 'invalidate'):
363 if hasattr(repo, 'invalidate'):
361 def invalidate(self):
364 def invalidate(self):
362 super(bookmark_repo, self).invalidate()
365 super(bookmark_repo, self).invalidate()
363 for attr in ('_bookmarks', '_bookmarkcurrent'):
366 for attr in ('_bookmarks', '_bookmarkcurrent'):
364 if attr in self.__dict__:
367 if attr in self.__dict__:
365 delattr(self, attr)
368 delattr(self, attr)
366
369
367 repo.__class__ = bookmark_repo
370 repo.__class__ = bookmark_repo
368
371
369 def listbookmarks(repo):
372 def listbookmarks(repo):
370 d = {}
373 d = {}
371 for k, v in repo._bookmarks.iteritems():
374 for k, v in repo._bookmarks.iteritems():
372 d[k] = hex(v)
375 d[k] = hex(v)
373 return d
376 return d
374
377
375 def pushbookmark(repo, key, old, new):
378 def pushbookmark(repo, key, old, new):
376 w = repo.wlock()
379 w = repo.wlock()
377 try:
380 try:
378 marks = repo._bookmarks
381 marks = repo._bookmarks
379 if hex(marks.get(key, '')) != old:
382 if hex(marks.get(key, '')) != old:
380 return False
383 return False
381 if new == '':
384 if new == '':
382 del marks[key]
385 del marks[key]
383 else:
386 else:
384 if new not in repo:
387 if new not in repo:
385 return False
388 return False
386 marks[key] = repo[new].node()
389 marks[key] = repo[new].node()
387 write(repo)
390 write(repo)
388 return True
391 return True
389 finally:
392 finally:
390 w.release()
393 w.release()
391
394
392 def pull(oldpull, ui, repo, source="default", **opts):
395 def pull(oldpull, ui, repo, source="default", **opts):
393 # translate bookmark args to rev args for actual pull
396 # translate bookmark args to rev args for actual pull
394 if opts.get('bookmark'):
397 if opts.get('bookmark'):
395 # this is an unpleasant hack as pull will do this internally
398 # this is an unpleasant hack as pull will do this internally
396 source, branches = hg.parseurl(ui.expandpath(source),
399 source, branches = hg.parseurl(ui.expandpath(source),
397 opts.get('branch'))
400 opts.get('branch'))
398 other = hg.repository(hg.remoteui(repo, opts), source)
401 other = hg.repository(hg.remoteui(repo, opts), source)
399 rb = other.listkeys('bookmarks')
402 rb = other.listkeys('bookmarks')
400
403
401 for b in opts['bookmark']:
404 for b in opts['bookmark']:
402 if b not in rb:
405 if b not in rb:
403 raise util.Abort(_('remote bookmark %s not found!') % b)
406 raise util.Abort(_('remote bookmark %s not found!') % b)
404 opts.setdefault('rev', []).append(b)
407 opts.setdefault('rev', []).append(b)
405
408
406 result = oldpull(ui, repo, source, **opts)
409 result = oldpull(ui, repo, source, **opts)
407
410
408 # update specified bookmarks
411 # update specified bookmarks
409 if opts.get('bookmark'):
412 if opts.get('bookmark'):
410 for b in opts['bookmark']:
413 for b in opts['bookmark']:
411 # explicit pull overrides local bookmark if any
414 # explicit pull overrides local bookmark if any
412 ui.status(_("importing bookmark %s\n") % b)
415 ui.status(_("importing bookmark %s\n") % b)
413 repo._bookmarks[b] = repo[rb[b]].node()
416 repo._bookmarks[b] = repo[rb[b]].node()
414 write(repo)
417 write(repo)
415
418
416 return result
419 return result
417
420
418 def push(oldpush, ui, repo, dest=None, **opts):
421 def push(oldpush, ui, repo, dest=None, **opts):
419 dopush = True
422 dopush = True
420 if opts.get('bookmark'):
423 if opts.get('bookmark'):
421 dopush = False
424 dopush = False
422 for b in opts['bookmark']:
425 for b in opts['bookmark']:
423 if b in repo._bookmarks:
426 if b in repo._bookmarks:
424 dopush = True
427 dopush = True
425 opts.setdefault('rev', []).append(b)
428 opts.setdefault('rev', []).append(b)
426
429
427 result = 0
430 result = 0
428 if dopush:
431 if dopush:
429 result = oldpush(ui, repo, dest, **opts)
432 result = oldpush(ui, repo, dest, **opts)
430
433
431 if opts.get('bookmark'):
434 if opts.get('bookmark'):
432 # this is an unpleasant hack as push will do this internally
435 # this is an unpleasant hack as push will do this internally
433 dest = ui.expandpath(dest or 'default-push', dest or 'default')
436 dest = ui.expandpath(dest or 'default-push', dest or 'default')
434 dest, branches = hg.parseurl(dest, opts.get('branch'))
437 dest, branches = hg.parseurl(dest, opts.get('branch'))
435 other = hg.repository(hg.remoteui(repo, opts), dest)
438 other = hg.repository(hg.remoteui(repo, opts), dest)
436 rb = other.listkeys('bookmarks')
439 rb = other.listkeys('bookmarks')
437 for b in opts['bookmark']:
440 for b in opts['bookmark']:
438 # explicit push overrides remote bookmark if any
441 # explicit push overrides remote bookmark if any
439 if b in repo._bookmarks:
442 if b in repo._bookmarks:
440 ui.status(_("exporting bookmark %s\n") % b)
443 ui.status(_("exporting bookmark %s\n") % b)
441 new = repo[b].hex()
444 new = repo[b].hex()
442 else:
445 else:
443 ui.status(_("deleting remote bookmark %s\n") % b)
446 ui.status(_("deleting remote bookmark %s\n") % b)
444 new = '' # delete
447 new = '' # delete
445 old = rb.get(b, '')
448 old = rb.get(b, '')
446 r = other.pushkey('bookmarks', b, old, new)
449 r = other.pushkey('bookmarks', b, old, new)
447 if not r:
450 if not r:
448 ui.warn(_('updating bookmark %s failed!\n') % b)
451 ui.warn(_('updating bookmark %s failed!\n') % b)
449 if not result:
452 if not result:
450 result = 2
453 result = 2
451
454
452 return result
455 return result
453
456
454 def diffbookmarks(ui, repo, remote):
457 def diffbookmarks(ui, repo, remote):
455 ui.status(_("searching for changes\n"))
458 ui.status(_("searching for changes\n"))
456
459
457 lmarks = repo.listkeys('bookmarks')
460 lmarks = repo.listkeys('bookmarks')
458 rmarks = remote.listkeys('bookmarks')
461 rmarks = remote.listkeys('bookmarks')
459
462
460 diff = sorted(set(rmarks) - set(lmarks))
463 diff = sorted(set(rmarks) - set(lmarks))
461 for k in diff:
464 for k in diff:
462 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
465 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
463
466
464 if len(diff) <= 0:
467 if len(diff) <= 0:
465 ui.status(_("no changes found\n"))
468 ui.status(_("no changes found\n"))
466 return 1
469 return 1
467 return 0
470 return 0
468
471
469 def incoming(oldincoming, ui, repo, source="default", **opts):
472 def incoming(oldincoming, ui, repo, source="default", **opts):
470 if opts.get('bookmarks'):
473 if opts.get('bookmarks'):
471 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
474 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
472 other = hg.repository(hg.remoteui(repo, opts), source)
475 other = hg.repository(hg.remoteui(repo, opts), source)
473 ui.status(_('comparing with %s\n') % url.hidepassword(source))
476 ui.status(_('comparing with %s\n') % url.hidepassword(source))
474 return diffbookmarks(ui, repo, other)
477 return diffbookmarks(ui, repo, other)
475 else:
478 else:
476 return oldincoming(ui, repo, source, **opts)
479 return oldincoming(ui, repo, source, **opts)
477
480
478 def outgoing(oldoutgoing, ui, repo, dest=None, **opts):
481 def outgoing(oldoutgoing, ui, repo, dest=None, **opts):
479 if opts.get('bookmarks'):
482 if opts.get('bookmarks'):
480 dest = ui.expandpath(dest or 'default-push', dest or 'default')
483 dest = ui.expandpath(dest or 'default-push', dest or 'default')
481 dest, branches = hg.parseurl(dest, opts.get('branch'))
484 dest, branches = hg.parseurl(dest, opts.get('branch'))
482 other = hg.repository(hg.remoteui(repo, opts), dest)
485 other = hg.repository(hg.remoteui(repo, opts), dest)
483 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
486 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
484 return diffbookmarks(ui, other, repo)
487 return diffbookmarks(ui, other, repo)
485 else:
488 else:
486 return oldoutgoing(ui, repo, dest, **opts)
489 return oldoutgoing(ui, repo, dest, **opts)
487
490
488 def uisetup(ui):
491 def uisetup(ui):
489 extensions.wrapfunction(repair, "strip", strip)
492 extensions.wrapfunction(repair, "strip", strip)
490 if ui.configbool('bookmarks', 'track.current'):
493 if ui.configbool('bookmarks', 'track.current'):
491 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
494 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
492
495
493 entry = extensions.wrapcommand(commands.table, 'pull', pull)
496 entry = extensions.wrapcommand(commands.table, 'pull', pull)
494 entry[1].append(('B', 'bookmark', [],
497 entry[1].append(('B', 'bookmark', [],
495 _("bookmark to import")))
498 _("bookmark to import")))
496 entry = extensions.wrapcommand(commands.table, 'push', push)
499 entry = extensions.wrapcommand(commands.table, 'push', push)
497 entry[1].append(('B', 'bookmark', [],
500 entry[1].append(('B', 'bookmark', [],
498 _("bookmark to export")))
501 _("bookmark to export")))
499 entry = extensions.wrapcommand(commands.table, 'incoming', incoming)
502 entry = extensions.wrapcommand(commands.table, 'incoming', incoming)
500 entry[1].append(('B', 'bookmarks', False,
503 entry[1].append(('B', 'bookmarks', False,
501 _("compare bookmark")))
504 _("compare bookmark")))
502 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
505 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
503 entry[1].append(('B', 'bookmarks', False,
506 entry[1].append(('B', 'bookmarks', False,
504 _("compare bookmark")))
507 _("compare bookmark")))
505
508
506 pushkey.register('bookmarks', pushbookmark, listbookmarks)
509 pushkey.register('bookmarks', pushbookmark, listbookmarks)
507
510
508 def updatecurbookmark(orig, ui, repo, *args, **opts):
511 def updatecurbookmark(orig, ui, repo, *args, **opts):
509 '''Set the current bookmark
512 '''Set the current bookmark
510
513
511 If the user updates to a bookmark we update the .hg/bookmarks.current
514 If the user updates to a bookmark we update the .hg/bookmarks.current
512 file.
515 file.
513 '''
516 '''
514 res = orig(ui, repo, *args, **opts)
517 res = orig(ui, repo, *args, **opts)
515 rev = opts['rev']
518 rev = opts['rev']
516 if not rev and len(args) > 0:
519 if not rev and len(args) > 0:
517 rev = args[0]
520 rev = args[0]
518 setcurrent(repo, rev)
521 setcurrent(repo, rev)
519 return res
522 return res
520
523
521 cmdtable = {
524 cmdtable = {
522 "bookmarks":
525 "bookmarks":
523 (bookmark,
526 (bookmark,
524 [('f', 'force', False, _('force')),
527 [('f', 'force', False, _('force')),
525 ('r', 'rev', '', _('revision'), _('REV')),
528 ('r', 'rev', '', _('revision'), _('REV')),
526 ('d', 'delete', False, _('delete a given bookmark')),
529 ('d', 'delete', False, _('delete a given bookmark')),
527 ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))],
530 ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))],
528 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
531 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
529 }
532 }
530
533
531 colortable = {'bookmarks.current': 'green'}
534 colortable = {'bookmarks.current': 'green'}
@@ -1,3018 +1,3033 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help command` for more details)::
17 Common tasks (use :hg:`help command` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behaviour can be configured with::
31 files creations or deletions. This behaviour can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 You will by default be managing a patch queue named "patches". You can
41 You will by default be managing a patch queue named "patches". You can
42 create other, independent patch queues with the :hg:`qqueue` command.
42 create other, independent patch queues with the :hg:`qqueue` command.
43 '''
43 '''
44
44
45 from mercurial.i18n import _
45 from mercurial.i18n import _
46 from mercurial.node import bin, hex, short, nullid, nullrev
46 from mercurial.node import bin, hex, short, nullid, nullrev
47 from mercurial.lock import release
47 from mercurial.lock import release
48 from mercurial import commands, cmdutil, hg, patch, util
48 from mercurial import commands, cmdutil, hg, patch, util
49 from mercurial import repair, extensions, url, error
49 from mercurial import repair, extensions, url, error
50 import os, sys, re, errno
50 import os, sys, re, errno
51
51
52 commands.norepo += " qclone"
52 commands.norepo += " qclone"
53
53
54 # Patch names looks like unix-file names.
54 # Patch names looks like unix-file names.
55 # They must be joinable with queue directory and result in the patch path.
55 # They must be joinable with queue directory and result in the patch path.
56 normname = util.normpath
56 normname = util.normpath
57
57
58 class statusentry(object):
58 class statusentry(object):
59 def __init__(self, node, name):
59 def __init__(self, node, name):
60 self.node, self.name = node, name
60 self.node, self.name = node, name
61 def __repr__(self):
61 def __repr__(self):
62 return hex(self.node) + ':' + self.name
62 return hex(self.node) + ':' + self.name
63
63
64 class patchheader(object):
64 class patchheader(object):
65 def __init__(self, pf, plainmode=False):
65 def __init__(self, pf, plainmode=False):
66 def eatdiff(lines):
66 def eatdiff(lines):
67 while lines:
67 while lines:
68 l = lines[-1]
68 l = lines[-1]
69 if (l.startswith("diff -") or
69 if (l.startswith("diff -") or
70 l.startswith("Index:") or
70 l.startswith("Index:") or
71 l.startswith("===========")):
71 l.startswith("===========")):
72 del lines[-1]
72 del lines[-1]
73 else:
73 else:
74 break
74 break
75 def eatempty(lines):
75 def eatempty(lines):
76 while lines:
76 while lines:
77 if not lines[-1].strip():
77 if not lines[-1].strip():
78 del lines[-1]
78 del lines[-1]
79 else:
79 else:
80 break
80 break
81
81
82 message = []
82 message = []
83 comments = []
83 comments = []
84 user = None
84 user = None
85 date = None
85 date = None
86 parent = None
86 parent = None
87 format = None
87 format = None
88 subject = None
88 subject = None
89 diffstart = 0
89 diffstart = 0
90
90
91 for line in file(pf):
91 for line in file(pf):
92 line = line.rstrip()
92 line = line.rstrip()
93 if (line.startswith('diff --git')
93 if (line.startswith('diff --git')
94 or (diffstart and line.startswith('+++ '))):
94 or (diffstart and line.startswith('+++ '))):
95 diffstart = 2
95 diffstart = 2
96 break
96 break
97 diffstart = 0 # reset
97 diffstart = 0 # reset
98 if line.startswith("--- "):
98 if line.startswith("--- "):
99 diffstart = 1
99 diffstart = 1
100 continue
100 continue
101 elif format == "hgpatch":
101 elif format == "hgpatch":
102 # parse values when importing the result of an hg export
102 # parse values when importing the result of an hg export
103 if line.startswith("# User "):
103 if line.startswith("# User "):
104 user = line[7:]
104 user = line[7:]
105 elif line.startswith("# Date "):
105 elif line.startswith("# Date "):
106 date = line[7:]
106 date = line[7:]
107 elif line.startswith("# Parent "):
107 elif line.startswith("# Parent "):
108 parent = line[9:]
108 parent = line[9:]
109 elif not line.startswith("# ") and line:
109 elif not line.startswith("# ") and line:
110 message.append(line)
110 message.append(line)
111 format = None
111 format = None
112 elif line == '# HG changeset patch':
112 elif line == '# HG changeset patch':
113 message = []
113 message = []
114 format = "hgpatch"
114 format = "hgpatch"
115 elif (format != "tagdone" and (line.startswith("Subject: ") or
115 elif (format != "tagdone" and (line.startswith("Subject: ") or
116 line.startswith("subject: "))):
116 line.startswith("subject: "))):
117 subject = line[9:]
117 subject = line[9:]
118 format = "tag"
118 format = "tag"
119 elif (format != "tagdone" and (line.startswith("From: ") or
119 elif (format != "tagdone" and (line.startswith("From: ") or
120 line.startswith("from: "))):
120 line.startswith("from: "))):
121 user = line[6:]
121 user = line[6:]
122 format = "tag"
122 format = "tag"
123 elif (format != "tagdone" and (line.startswith("Date: ") or
123 elif (format != "tagdone" and (line.startswith("Date: ") or
124 line.startswith("date: "))):
124 line.startswith("date: "))):
125 date = line[6:]
125 date = line[6:]
126 format = "tag"
126 format = "tag"
127 elif format == "tag" and line == "":
127 elif format == "tag" and line == "":
128 # when looking for tags (subject: from: etc) they
128 # when looking for tags (subject: from: etc) they
129 # end once you find a blank line in the source
129 # end once you find a blank line in the source
130 format = "tagdone"
130 format = "tagdone"
131 elif message or line:
131 elif message or line:
132 message.append(line)
132 message.append(line)
133 comments.append(line)
133 comments.append(line)
134
134
135 eatdiff(message)
135 eatdiff(message)
136 eatdiff(comments)
136 eatdiff(comments)
137 eatempty(message)
137 eatempty(message)
138 eatempty(comments)
138 eatempty(comments)
139
139
140 # make sure message isn't empty
140 # make sure message isn't empty
141 if format and format.startswith("tag") and subject:
141 if format and format.startswith("tag") and subject:
142 message.insert(0, "")
142 message.insert(0, "")
143 message.insert(0, subject)
143 message.insert(0, subject)
144
144
145 self.message = message
145 self.message = message
146 self.comments = comments
146 self.comments = comments
147 self.user = user
147 self.user = user
148 self.date = date
148 self.date = date
149 self.parent = parent
149 self.parent = parent
150 self.haspatch = diffstart > 1
150 self.haspatch = diffstart > 1
151 self.plainmode = plainmode
151 self.plainmode = plainmode
152
152
153 def setuser(self, user):
153 def setuser(self, user):
154 if not self.updateheader(['From: ', '# User '], user):
154 if not self.updateheader(['From: ', '# User '], user):
155 try:
155 try:
156 patchheaderat = self.comments.index('# HG changeset patch')
156 patchheaderat = self.comments.index('# HG changeset patch')
157 self.comments.insert(patchheaderat + 1, '# User ' + user)
157 self.comments.insert(patchheaderat + 1, '# User ' + user)
158 except ValueError:
158 except ValueError:
159 if self.plainmode or self._hasheader(['Date: ']):
159 if self.plainmode or self._hasheader(['Date: ']):
160 self.comments = ['From: ' + user] + self.comments
160 self.comments = ['From: ' + user] + self.comments
161 else:
161 else:
162 tmp = ['# HG changeset patch', '# User ' + user, '']
162 tmp = ['# HG changeset patch', '# User ' + user, '']
163 self.comments = tmp + self.comments
163 self.comments = tmp + self.comments
164 self.user = user
164 self.user = user
165
165
166 def setdate(self, date):
166 def setdate(self, date):
167 if not self.updateheader(['Date: ', '# Date '], date):
167 if not self.updateheader(['Date: ', '# Date '], date):
168 try:
168 try:
169 patchheaderat = self.comments.index('# HG changeset patch')
169 patchheaderat = self.comments.index('# HG changeset patch')
170 self.comments.insert(patchheaderat + 1, '# Date ' + date)
170 self.comments.insert(patchheaderat + 1, '# Date ' + date)
171 except ValueError:
171 except ValueError:
172 if self.plainmode or self._hasheader(['From: ']):
172 if self.plainmode or self._hasheader(['From: ']):
173 self.comments = ['Date: ' + date] + self.comments
173 self.comments = ['Date: ' + date] + self.comments
174 else:
174 else:
175 tmp = ['# HG changeset patch', '# Date ' + date, '']
175 tmp = ['# HG changeset patch', '# Date ' + date, '']
176 self.comments = tmp + self.comments
176 self.comments = tmp + self.comments
177 self.date = date
177 self.date = date
178
178
179 def setparent(self, parent):
179 def setparent(self, parent):
180 if not self.updateheader(['# Parent '], parent):
180 if not self.updateheader(['# Parent '], parent):
181 try:
181 try:
182 patchheaderat = self.comments.index('# HG changeset patch')
182 patchheaderat = self.comments.index('# HG changeset patch')
183 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
183 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
184 except ValueError:
184 except ValueError:
185 pass
185 pass
186 self.parent = parent
186 self.parent = parent
187
187
188 def setmessage(self, message):
188 def setmessage(self, message):
189 if self.comments:
189 if self.comments:
190 self._delmsg()
190 self._delmsg()
191 self.message = [message]
191 self.message = [message]
192 self.comments += self.message
192 self.comments += self.message
193
193
194 def updateheader(self, prefixes, new):
194 def updateheader(self, prefixes, new):
195 '''Update all references to a field in the patch header.
195 '''Update all references to a field in the patch header.
196 Return whether the field is present.'''
196 Return whether the field is present.'''
197 res = False
197 res = False
198 for prefix in prefixes:
198 for prefix in prefixes:
199 for i in xrange(len(self.comments)):
199 for i in xrange(len(self.comments)):
200 if self.comments[i].startswith(prefix):
200 if self.comments[i].startswith(prefix):
201 self.comments[i] = prefix + new
201 self.comments[i] = prefix + new
202 res = True
202 res = True
203 break
203 break
204 return res
204 return res
205
205
206 def _hasheader(self, prefixes):
206 def _hasheader(self, prefixes):
207 '''Check if a header starts with any of the given prefixes.'''
207 '''Check if a header starts with any of the given prefixes.'''
208 for prefix in prefixes:
208 for prefix in prefixes:
209 for comment in self.comments:
209 for comment in self.comments:
210 if comment.startswith(prefix):
210 if comment.startswith(prefix):
211 return True
211 return True
212 return False
212 return False
213
213
214 def __str__(self):
214 def __str__(self):
215 if not self.comments:
215 if not self.comments:
216 return ''
216 return ''
217 return '\n'.join(self.comments) + '\n\n'
217 return '\n'.join(self.comments) + '\n\n'
218
218
219 def _delmsg(self):
219 def _delmsg(self):
220 '''Remove existing message, keeping the rest of the comments fields.
220 '''Remove existing message, keeping the rest of the comments fields.
221 If comments contains 'subject: ', message will prepend
221 If comments contains 'subject: ', message will prepend
222 the field and a blank line.'''
222 the field and a blank line.'''
223 if self.message:
223 if self.message:
224 subj = 'subject: ' + self.message[0].lower()
224 subj = 'subject: ' + self.message[0].lower()
225 for i in xrange(len(self.comments)):
225 for i in xrange(len(self.comments)):
226 if subj == self.comments[i].lower():
226 if subj == self.comments[i].lower():
227 del self.comments[i]
227 del self.comments[i]
228 self.message = self.message[2:]
228 self.message = self.message[2:]
229 break
229 break
230 ci = 0
230 ci = 0
231 for mi in self.message:
231 for mi in self.message:
232 while mi != self.comments[ci]:
232 while mi != self.comments[ci]:
233 ci += 1
233 ci += 1
234 del self.comments[ci]
234 del self.comments[ci]
235
235
236 class queue(object):
236 class queue(object):
237 def __init__(self, ui, path, patchdir=None):
237 def __init__(self, ui, path, patchdir=None):
238 self.basepath = path
238 self.basepath = path
239 try:
239 try:
240 fh = open(os.path.join(path, 'patches.queue'))
240 fh = open(os.path.join(path, 'patches.queue'))
241 cur = fh.read().rstrip()
241 cur = fh.read().rstrip()
242 if not cur:
242 if not cur:
243 curpath = os.path.join(path, 'patches')
243 curpath = os.path.join(path, 'patches')
244 else:
244 else:
245 curpath = os.path.join(path, 'patches-' + cur)
245 curpath = os.path.join(path, 'patches-' + cur)
246 except IOError:
246 except IOError:
247 curpath = os.path.join(path, 'patches')
247 curpath = os.path.join(path, 'patches')
248 self.path = patchdir or curpath
248 self.path = patchdir or curpath
249 self.opener = util.opener(self.path)
249 self.opener = util.opener(self.path)
250 self.ui = ui
250 self.ui = ui
251 self.applied_dirty = 0
251 self.applied_dirty = 0
252 self.series_dirty = 0
252 self.series_dirty = 0
253 self.added = []
253 self.added = []
254 self.series_path = "series"
254 self.series_path = "series"
255 self.status_path = "status"
255 self.status_path = "status"
256 self.guards_path = "guards"
256 self.guards_path = "guards"
257 self.active_guards = None
257 self.active_guards = None
258 self.guards_dirty = False
258 self.guards_dirty = False
259 # Handle mq.git as a bool with extended values
259 # Handle mq.git as a bool with extended values
260 try:
260 try:
261 gitmode = ui.configbool('mq', 'git', None)
261 gitmode = ui.configbool('mq', 'git', None)
262 if gitmode is None:
262 if gitmode is None:
263 raise error.ConfigError()
263 raise error.ConfigError()
264 self.gitmode = gitmode and 'yes' or 'no'
264 self.gitmode = gitmode and 'yes' or 'no'
265 except error.ConfigError:
265 except error.ConfigError:
266 self.gitmode = ui.config('mq', 'git', 'auto').lower()
266 self.gitmode = ui.config('mq', 'git', 'auto').lower()
267 self.plainmode = ui.configbool('mq', 'plain', False)
267 self.plainmode = ui.configbool('mq', 'plain', False)
268
268
269 @util.propertycache
269 @util.propertycache
270 def applied(self):
270 def applied(self):
271 if os.path.exists(self.join(self.status_path)):
271 if os.path.exists(self.join(self.status_path)):
272 def parse(l):
272 def parse(l):
273 n, name = l.split(':', 1)
273 n, name = l.split(':', 1)
274 return statusentry(bin(n), name)
274 return statusentry(bin(n), name)
275 lines = self.opener(self.status_path).read().splitlines()
275 lines = self.opener(self.status_path).read().splitlines()
276 return [parse(l) for l in lines]
276 return [parse(l) for l in lines]
277 return []
277 return []
278
278
279 @util.propertycache
279 @util.propertycache
280 def full_series(self):
280 def full_series(self):
281 if os.path.exists(self.join(self.series_path)):
281 if os.path.exists(self.join(self.series_path)):
282 return self.opener(self.series_path).read().splitlines()
282 return self.opener(self.series_path).read().splitlines()
283 return []
283 return []
284
284
285 @util.propertycache
285 @util.propertycache
286 def series(self):
286 def series(self):
287 self.parse_series()
287 self.parse_series()
288 return self.series
288 return self.series
289
289
290 @util.propertycache
290 @util.propertycache
291 def series_guards(self):
291 def series_guards(self):
292 self.parse_series()
292 self.parse_series()
293 return self.series_guards
293 return self.series_guards
294
294
295 def invalidate(self):
295 def invalidate(self):
296 for a in 'applied full_series series series_guards'.split():
296 for a in 'applied full_series series series_guards'.split():
297 if a in self.__dict__:
297 if a in self.__dict__:
298 delattr(self, a)
298 delattr(self, a)
299 self.applied_dirty = 0
299 self.applied_dirty = 0
300 self.series_dirty = 0
300 self.series_dirty = 0
301 self.guards_dirty = False
301 self.guards_dirty = False
302 self.active_guards = None
302 self.active_guards = None
303
303
304 def diffopts(self, opts={}, patchfn=None):
304 def diffopts(self, opts={}, patchfn=None):
305 diffopts = patch.diffopts(self.ui, opts)
305 diffopts = patch.diffopts(self.ui, opts)
306 if self.gitmode == 'auto':
306 if self.gitmode == 'auto':
307 diffopts.upgrade = True
307 diffopts.upgrade = True
308 elif self.gitmode == 'keep':
308 elif self.gitmode == 'keep':
309 pass
309 pass
310 elif self.gitmode in ('yes', 'no'):
310 elif self.gitmode in ('yes', 'no'):
311 diffopts.git = self.gitmode == 'yes'
311 diffopts.git = self.gitmode == 'yes'
312 else:
312 else:
313 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
313 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
314 ' got %s') % self.gitmode)
314 ' got %s') % self.gitmode)
315 if patchfn:
315 if patchfn:
316 diffopts = self.patchopts(diffopts, patchfn)
316 diffopts = self.patchopts(diffopts, patchfn)
317 return diffopts
317 return diffopts
318
318
319 def patchopts(self, diffopts, *patches):
319 def patchopts(self, diffopts, *patches):
320 """Return a copy of input diff options with git set to true if
320 """Return a copy of input diff options with git set to true if
321 referenced patch is a git patch and should be preserved as such.
321 referenced patch is a git patch and should be preserved as such.
322 """
322 """
323 diffopts = diffopts.copy()
323 diffopts = diffopts.copy()
324 if not diffopts.git and self.gitmode == 'keep':
324 if not diffopts.git and self.gitmode == 'keep':
325 for patchfn in patches:
325 for patchfn in patches:
326 patchf = self.opener(patchfn, 'r')
326 patchf = self.opener(patchfn, 'r')
327 # if the patch was a git patch, refresh it as a git patch
327 # if the patch was a git patch, refresh it as a git patch
328 for line in patchf:
328 for line in patchf:
329 if line.startswith('diff --git'):
329 if line.startswith('diff --git'):
330 diffopts.git = True
330 diffopts.git = True
331 break
331 break
332 patchf.close()
332 patchf.close()
333 return diffopts
333 return diffopts
334
334
335 def join(self, *p):
335 def join(self, *p):
336 return os.path.join(self.path, *p)
336 return os.path.join(self.path, *p)
337
337
338 def find_series(self, patch):
338 def find_series(self, patch):
339 def matchpatch(l):
339 def matchpatch(l):
340 l = l.split('#', 1)[0]
340 l = l.split('#', 1)[0]
341 return l.strip() == patch
341 return l.strip() == patch
342 for index, l in enumerate(self.full_series):
342 for index, l in enumerate(self.full_series):
343 if matchpatch(l):
343 if matchpatch(l):
344 return index
344 return index
345 return None
345 return None
346
346
347 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
347 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
348
348
349 def parse_series(self):
349 def parse_series(self):
350 self.series = []
350 self.series = []
351 self.series_guards = []
351 self.series_guards = []
352 for l in self.full_series:
352 for l in self.full_series:
353 h = l.find('#')
353 h = l.find('#')
354 if h == -1:
354 if h == -1:
355 patch = l
355 patch = l
356 comment = ''
356 comment = ''
357 elif h == 0:
357 elif h == 0:
358 continue
358 continue
359 else:
359 else:
360 patch = l[:h]
360 patch = l[:h]
361 comment = l[h:]
361 comment = l[h:]
362 patch = patch.strip()
362 patch = patch.strip()
363 if patch:
363 if patch:
364 if patch in self.series:
364 if patch in self.series:
365 raise util.Abort(_('%s appears more than once in %s') %
365 raise util.Abort(_('%s appears more than once in %s') %
366 (patch, self.join(self.series_path)))
366 (patch, self.join(self.series_path)))
367 self.series.append(patch)
367 self.series.append(patch)
368 self.series_guards.append(self.guard_re.findall(comment))
368 self.series_guards.append(self.guard_re.findall(comment))
369
369
370 def check_guard(self, guard):
370 def check_guard(self, guard):
371 if not guard:
371 if not guard:
372 return _('guard cannot be an empty string')
372 return _('guard cannot be an empty string')
373 bad_chars = '# \t\r\n\f'
373 bad_chars = '# \t\r\n\f'
374 first = guard[0]
374 first = guard[0]
375 if first in '-+':
375 if first in '-+':
376 return (_('guard %r starts with invalid character: %r') %
376 return (_('guard %r starts with invalid character: %r') %
377 (guard, first))
377 (guard, first))
378 for c in bad_chars:
378 for c in bad_chars:
379 if c in guard:
379 if c in guard:
380 return _('invalid character in guard %r: %r') % (guard, c)
380 return _('invalid character in guard %r: %r') % (guard, c)
381
381
382 def set_active(self, guards):
382 def set_active(self, guards):
383 for guard in guards:
383 for guard in guards:
384 bad = self.check_guard(guard)
384 bad = self.check_guard(guard)
385 if bad:
385 if bad:
386 raise util.Abort(bad)
386 raise util.Abort(bad)
387 guards = sorted(set(guards))
387 guards = sorted(set(guards))
388 self.ui.debug('active guards: %s\n' % ' '.join(guards))
388 self.ui.debug('active guards: %s\n' % ' '.join(guards))
389 self.active_guards = guards
389 self.active_guards = guards
390 self.guards_dirty = True
390 self.guards_dirty = True
391
391
392 def active(self):
392 def active(self):
393 if self.active_guards is None:
393 if self.active_guards is None:
394 self.active_guards = []
394 self.active_guards = []
395 try:
395 try:
396 guards = self.opener(self.guards_path).read().split()
396 guards = self.opener(self.guards_path).read().split()
397 except IOError, err:
397 except IOError, err:
398 if err.errno != errno.ENOENT:
398 if err.errno != errno.ENOENT:
399 raise
399 raise
400 guards = []
400 guards = []
401 for i, guard in enumerate(guards):
401 for i, guard in enumerate(guards):
402 bad = self.check_guard(guard)
402 bad = self.check_guard(guard)
403 if bad:
403 if bad:
404 self.ui.warn('%s:%d: %s\n' %
404 self.ui.warn('%s:%d: %s\n' %
405 (self.join(self.guards_path), i + 1, bad))
405 (self.join(self.guards_path), i + 1, bad))
406 else:
406 else:
407 self.active_guards.append(guard)
407 self.active_guards.append(guard)
408 return self.active_guards
408 return self.active_guards
409
409
410 def set_guards(self, idx, guards):
410 def set_guards(self, idx, guards):
411 for g in guards:
411 for g in guards:
412 if len(g) < 2:
412 if len(g) < 2:
413 raise util.Abort(_('guard %r too short') % g)
413 raise util.Abort(_('guard %r too short') % g)
414 if g[0] not in '-+':
414 if g[0] not in '-+':
415 raise util.Abort(_('guard %r starts with invalid char') % g)
415 raise util.Abort(_('guard %r starts with invalid char') % g)
416 bad = self.check_guard(g[1:])
416 bad = self.check_guard(g[1:])
417 if bad:
417 if bad:
418 raise util.Abort(bad)
418 raise util.Abort(bad)
419 drop = self.guard_re.sub('', self.full_series[idx])
419 drop = self.guard_re.sub('', self.full_series[idx])
420 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
420 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
421 self.parse_series()
421 self.parse_series()
422 self.series_dirty = True
422 self.series_dirty = True
423
423
424 def pushable(self, idx):
424 def pushable(self, idx):
425 if isinstance(idx, str):
425 if isinstance(idx, str):
426 idx = self.series.index(idx)
426 idx = self.series.index(idx)
427 patchguards = self.series_guards[idx]
427 patchguards = self.series_guards[idx]
428 if not patchguards:
428 if not patchguards:
429 return True, None
429 return True, None
430 guards = self.active()
430 guards = self.active()
431 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
431 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
432 if exactneg:
432 if exactneg:
433 return False, exactneg[0]
433 return False, exactneg[0]
434 pos = [g for g in patchguards if g[0] == '+']
434 pos = [g for g in patchguards if g[0] == '+']
435 exactpos = [g for g in pos if g[1:] in guards]
435 exactpos = [g for g in pos if g[1:] in guards]
436 if pos:
436 if pos:
437 if exactpos:
437 if exactpos:
438 return True, exactpos[0]
438 return True, exactpos[0]
439 return False, pos
439 return False, pos
440 return True, ''
440 return True, ''
441
441
442 def explain_pushable(self, idx, all_patches=False):
442 def explain_pushable(self, idx, all_patches=False):
443 write = all_patches and self.ui.write or self.ui.warn
443 write = all_patches and self.ui.write or self.ui.warn
444 if all_patches or self.ui.verbose:
444 if all_patches or self.ui.verbose:
445 if isinstance(idx, str):
445 if isinstance(idx, str):
446 idx = self.series.index(idx)
446 idx = self.series.index(idx)
447 pushable, why = self.pushable(idx)
447 pushable, why = self.pushable(idx)
448 if all_patches and pushable:
448 if all_patches and pushable:
449 if why is None:
449 if why is None:
450 write(_('allowing %s - no guards in effect\n') %
450 write(_('allowing %s - no guards in effect\n') %
451 self.series[idx])
451 self.series[idx])
452 else:
452 else:
453 if not why:
453 if not why:
454 write(_('allowing %s - no matching negative guards\n') %
454 write(_('allowing %s - no matching negative guards\n') %
455 self.series[idx])
455 self.series[idx])
456 else:
456 else:
457 write(_('allowing %s - guarded by %r\n') %
457 write(_('allowing %s - guarded by %r\n') %
458 (self.series[idx], why))
458 (self.series[idx], why))
459 if not pushable:
459 if not pushable:
460 if why:
460 if why:
461 write(_('skipping %s - guarded by %r\n') %
461 write(_('skipping %s - guarded by %r\n') %
462 (self.series[idx], why))
462 (self.series[idx], why))
463 else:
463 else:
464 write(_('skipping %s - no matching guards\n') %
464 write(_('skipping %s - no matching guards\n') %
465 self.series[idx])
465 self.series[idx])
466
466
467 def save_dirty(self):
467 def save_dirty(self):
468 def write_list(items, path):
468 def write_list(items, path):
469 fp = self.opener(path, 'w')
469 fp = self.opener(path, 'w')
470 for i in items:
470 for i in items:
471 fp.write("%s\n" % i)
471 fp.write("%s\n" % i)
472 fp.close()
472 fp.close()
473 if self.applied_dirty:
473 if self.applied_dirty:
474 write_list(map(str, self.applied), self.status_path)
474 write_list(map(str, self.applied), self.status_path)
475 if self.series_dirty:
475 if self.series_dirty:
476 write_list(self.full_series, self.series_path)
476 write_list(self.full_series, self.series_path)
477 if self.guards_dirty:
477 if self.guards_dirty:
478 write_list(self.active_guards, self.guards_path)
478 write_list(self.active_guards, self.guards_path)
479 if self.added:
479 if self.added:
480 qrepo = self.qrepo()
480 qrepo = self.qrepo()
481 if qrepo:
481 if qrepo:
482 qrepo[None].add(self.added)
482 qrepo[None].add(self.added)
483 self.added = []
483 self.added = []
484
484
485 def removeundo(self, repo):
485 def removeundo(self, repo):
486 undo = repo.sjoin('undo')
486 undo = repo.sjoin('undo')
487 if not os.path.exists(undo):
487 if not os.path.exists(undo):
488 return
488 return
489 try:
489 try:
490 os.unlink(undo)
490 os.unlink(undo)
491 except OSError, inst:
491 except OSError, inst:
492 self.ui.warn(_('error removing undo: %s\n') % str(inst))
492 self.ui.warn(_('error removing undo: %s\n') % str(inst))
493
493
494 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
494 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
495 fp=None, changes=None, opts={}):
495 fp=None, changes=None, opts={}):
496 stat = opts.get('stat')
496 stat = opts.get('stat')
497 m = cmdutil.match(repo, files, opts)
497 m = cmdutil.match(repo, files, opts)
498 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
498 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
499 changes, stat, fp)
499 changes, stat, fp)
500
500
501 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
501 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
502 # first try just applying the patch
502 # first try just applying the patch
503 (err, n) = self.apply(repo, [patch], update_status=False,
503 (err, n) = self.apply(repo, [patch], update_status=False,
504 strict=True, merge=rev)
504 strict=True, merge=rev)
505
505
506 if err == 0:
506 if err == 0:
507 return (err, n)
507 return (err, n)
508
508
509 if n is None:
509 if n is None:
510 raise util.Abort(_("apply failed for patch %s") % patch)
510 raise util.Abort(_("apply failed for patch %s") % patch)
511
511
512 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
512 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
513
513
514 # apply failed, strip away that rev and merge.
514 # apply failed, strip away that rev and merge.
515 hg.clean(repo, head)
515 hg.clean(repo, head)
516 self.strip(repo, n, update=False, backup='strip')
516 self.strip(repo, n, update=False, backup='strip')
517
517
518 ctx = repo[rev]
518 ctx = repo[rev]
519 ret = hg.merge(repo, rev)
519 ret = hg.merge(repo, rev)
520 if ret:
520 if ret:
521 raise util.Abort(_("update returned %d") % ret)
521 raise util.Abort(_("update returned %d") % ret)
522 n = repo.commit(ctx.description(), ctx.user(), force=True)
522 n = repo.commit(ctx.description(), ctx.user(), force=True)
523 if n is None:
523 if n is None:
524 raise util.Abort(_("repo commit failed"))
524 raise util.Abort(_("repo commit failed"))
525 try:
525 try:
526 ph = patchheader(mergeq.join(patch), self.plainmode)
526 ph = patchheader(mergeq.join(patch), self.plainmode)
527 except:
527 except:
528 raise util.Abort(_("unable to read %s") % patch)
528 raise util.Abort(_("unable to read %s") % patch)
529
529
530 diffopts = self.patchopts(diffopts, patch)
530 diffopts = self.patchopts(diffopts, patch)
531 patchf = self.opener(patch, "w")
531 patchf = self.opener(patch, "w")
532 comments = str(ph)
532 comments = str(ph)
533 if comments:
533 if comments:
534 patchf.write(comments)
534 patchf.write(comments)
535 self.printdiff(repo, diffopts, head, n, fp=patchf)
535 self.printdiff(repo, diffopts, head, n, fp=patchf)
536 patchf.close()
536 patchf.close()
537 self.removeundo(repo)
537 self.removeundo(repo)
538 return (0, n)
538 return (0, n)
539
539
540 def qparents(self, repo, rev=None):
540 def qparents(self, repo, rev=None):
541 if rev is None:
541 if rev is None:
542 (p1, p2) = repo.dirstate.parents()
542 (p1, p2) = repo.dirstate.parents()
543 if p2 == nullid:
543 if p2 == nullid:
544 return p1
544 return p1
545 if not self.applied:
545 if not self.applied:
546 return None
546 return None
547 return self.applied[-1].node
547 return self.applied[-1].node
548 p1, p2 = repo.changelog.parents(rev)
548 p1, p2 = repo.changelog.parents(rev)
549 if p2 != nullid and p2 in [x.node for x in self.applied]:
549 if p2 != nullid and p2 in [x.node for x in self.applied]:
550 return p2
550 return p2
551 return p1
551 return p1
552
552
553 def mergepatch(self, repo, mergeq, series, diffopts):
553 def mergepatch(self, repo, mergeq, series, diffopts):
554 if not self.applied:
554 if not self.applied:
555 # each of the patches merged in will have two parents. This
555 # each of the patches merged in will have two parents. This
556 # can confuse the qrefresh, qdiff, and strip code because it
556 # can confuse the qrefresh, qdiff, and strip code because it
557 # needs to know which parent is actually in the patch queue.
557 # needs to know which parent is actually in the patch queue.
558 # so, we insert a merge marker with only one parent. This way
558 # so, we insert a merge marker with only one parent. This way
559 # the first patch in the queue is never a merge patch
559 # the first patch in the queue is never a merge patch
560 #
560 #
561 pname = ".hg.patches.merge.marker"
561 pname = ".hg.patches.merge.marker"
562 n = repo.commit('[mq]: merge marker', force=True)
562 n = repo.commit('[mq]: merge marker', force=True)
563 self.removeundo(repo)
563 self.removeundo(repo)
564 self.applied.append(statusentry(n, pname))
564 self.applied.append(statusentry(n, pname))
565 self.applied_dirty = 1
565 self.applied_dirty = 1
566
566
567 head = self.qparents(repo)
567 head = self.qparents(repo)
568
568
569 for patch in series:
569 for patch in series:
570 patch = mergeq.lookup(patch, strict=True)
570 patch = mergeq.lookup(patch, strict=True)
571 if not patch:
571 if not patch:
572 self.ui.warn(_("patch %s does not exist\n") % patch)
572 self.ui.warn(_("patch %s does not exist\n") % patch)
573 return (1, None)
573 return (1, None)
574 pushable, reason = self.pushable(patch)
574 pushable, reason = self.pushable(patch)
575 if not pushable:
575 if not pushable:
576 self.explain_pushable(patch, all_patches=True)
576 self.explain_pushable(patch, all_patches=True)
577 continue
577 continue
578 info = mergeq.isapplied(patch)
578 info = mergeq.isapplied(patch)
579 if not info:
579 if not info:
580 self.ui.warn(_("patch %s is not applied\n") % patch)
580 self.ui.warn(_("patch %s is not applied\n") % patch)
581 return (1, None)
581 return (1, None)
582 rev = info[1]
582 rev = info[1]
583 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
583 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
584 if head:
584 if head:
585 self.applied.append(statusentry(head, patch))
585 self.applied.append(statusentry(head, patch))
586 self.applied_dirty = 1
586 self.applied_dirty = 1
587 if err:
587 if err:
588 return (err, head)
588 return (err, head)
589 self.save_dirty()
589 self.save_dirty()
590 return (0, head)
590 return (0, head)
591
591
592 def patch(self, repo, patchfile):
592 def patch(self, repo, patchfile):
593 '''Apply patchfile to the working directory.
593 '''Apply patchfile to the working directory.
594 patchfile: name of patch file'''
594 patchfile: name of patch file'''
595 files = {}
595 files = {}
596 try:
596 try:
597 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
597 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
598 files=files, eolmode=None)
598 files=files, eolmode=None)
599 except Exception, inst:
599 except Exception, inst:
600 self.ui.note(str(inst) + '\n')
600 self.ui.note(str(inst) + '\n')
601 if not self.ui.verbose:
601 if not self.ui.verbose:
602 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
602 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
603 return (False, files, False)
603 return (False, files, False)
604
604
605 return (True, files, fuzz)
605 return (True, files, fuzz)
606
606
607 def apply(self, repo, series, list=False, update_status=True,
607 def apply(self, repo, series, list=False, update_status=True,
608 strict=False, patchdir=None, merge=None, all_files=None):
608 strict=False, patchdir=None, merge=None, all_files=None):
609 wlock = lock = tr = None
609 wlock = lock = tr = None
610 try:
610 try:
611 wlock = repo.wlock()
611 wlock = repo.wlock()
612 lock = repo.lock()
612 lock = repo.lock()
613 tr = repo.transaction("qpush")
613 tr = repo.transaction("qpush")
614 try:
614 try:
615 ret = self._apply(repo, series, list, update_status,
615 ret = self._apply(repo, series, list, update_status,
616 strict, patchdir, merge, all_files=all_files)
616 strict, patchdir, merge, all_files=all_files)
617 tr.close()
617 tr.close()
618 self.save_dirty()
618 self.save_dirty()
619 return ret
619 return ret
620 except:
620 except:
621 try:
621 try:
622 tr.abort()
622 tr.abort()
623 finally:
623 finally:
624 repo.invalidate()
624 repo.invalidate()
625 repo.dirstate.invalidate()
625 repo.dirstate.invalidate()
626 raise
626 raise
627 finally:
627 finally:
628 release(tr, lock, wlock)
628 release(tr, lock, wlock)
629 self.removeundo(repo)
629 self.removeundo(repo)
630
630
631 def _apply(self, repo, series, list=False, update_status=True,
631 def _apply(self, repo, series, list=False, update_status=True,
632 strict=False, patchdir=None, merge=None, all_files=None):
632 strict=False, patchdir=None, merge=None, all_files=None):
633 '''returns (error, hash)
633 '''returns (error, hash)
634 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
634 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
635 # TODO unify with commands.py
635 # TODO unify with commands.py
636 if not patchdir:
636 if not patchdir:
637 patchdir = self.path
637 patchdir = self.path
638 err = 0
638 err = 0
639 n = None
639 n = None
640 for patchname in series:
640 for patchname in series:
641 pushable, reason = self.pushable(patchname)
641 pushable, reason = self.pushable(patchname)
642 if not pushable:
642 if not pushable:
643 self.explain_pushable(patchname, all_patches=True)
643 self.explain_pushable(patchname, all_patches=True)
644 continue
644 continue
645 self.ui.status(_("applying %s\n") % patchname)
645 self.ui.status(_("applying %s\n") % patchname)
646 pf = os.path.join(patchdir, patchname)
646 pf = os.path.join(patchdir, patchname)
647
647
648 try:
648 try:
649 ph = patchheader(self.join(patchname), self.plainmode)
649 ph = patchheader(self.join(patchname), self.plainmode)
650 except:
650 except:
651 self.ui.warn(_("unable to read %s\n") % patchname)
651 self.ui.warn(_("unable to read %s\n") % patchname)
652 err = 1
652 err = 1
653 break
653 break
654
654
655 message = ph.message
655 message = ph.message
656 if not message:
656 if not message:
657 message = "imported patch %s\n" % patchname
657 message = "imported patch %s\n" % patchname
658 else:
658 else:
659 if list:
659 if list:
660 message.append("\nimported patch %s" % patchname)
660 message.append("\nimported patch %s" % patchname)
661 message = '\n'.join(message)
661 message = '\n'.join(message)
662
662
663 if ph.haspatch:
663 if ph.haspatch:
664 (patcherr, files, fuzz) = self.patch(repo, pf)
664 (patcherr, files, fuzz) = self.patch(repo, pf)
665 if all_files is not None:
665 if all_files is not None:
666 all_files.update(files)
666 all_files.update(files)
667 patcherr = not patcherr
667 patcherr = not patcherr
668 else:
668 else:
669 self.ui.warn(_("patch %s is empty\n") % patchname)
669 self.ui.warn(_("patch %s is empty\n") % patchname)
670 patcherr, files, fuzz = 0, [], 0
670 patcherr, files, fuzz = 0, [], 0
671
671
672 if merge and files:
672 if merge and files:
673 # Mark as removed/merged and update dirstate parent info
673 # Mark as removed/merged and update dirstate parent info
674 removed = []
674 removed = []
675 merged = []
675 merged = []
676 for f in files:
676 for f in files:
677 if os.path.exists(repo.wjoin(f)):
677 if os.path.exists(repo.wjoin(f)):
678 merged.append(f)
678 merged.append(f)
679 else:
679 else:
680 removed.append(f)
680 removed.append(f)
681 for f in removed:
681 for f in removed:
682 repo.dirstate.remove(f)
682 repo.dirstate.remove(f)
683 for f in merged:
683 for f in merged:
684 repo.dirstate.merge(f)
684 repo.dirstate.merge(f)
685 p1, p2 = repo.dirstate.parents()
685 p1, p2 = repo.dirstate.parents()
686 repo.dirstate.setparents(p1, merge)
686 repo.dirstate.setparents(p1, merge)
687
687
688 files = patch.updatedir(self.ui, repo, files)
688 files = patch.updatedir(self.ui, repo, files)
689 match = cmdutil.matchfiles(repo, files or [])
689 match = cmdutil.matchfiles(repo, files or [])
690 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
690 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
691
691
692 if n is None:
692 if n is None:
693 raise util.Abort(_("repo commit failed"))
693 raise util.Abort(_("repo commit failed"))
694
694
695 if update_status:
695 if update_status:
696 self.applied.append(statusentry(n, patchname))
696 self.applied.append(statusentry(n, patchname))
697
697
698 if patcherr:
698 if patcherr:
699 self.ui.warn(_("patch failed, rejects left in working dir\n"))
699 self.ui.warn(_("patch failed, rejects left in working dir\n"))
700 err = 2
700 err = 2
701 break
701 break
702
702
703 if fuzz and strict:
703 if fuzz and strict:
704 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
704 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
705 err = 3
705 err = 3
706 break
706 break
707 return (err, n)
707 return (err, n)
708
708
709 def _cleanup(self, patches, numrevs, keep=False):
709 def _cleanup(self, patches, numrevs, keep=False):
710 if not keep:
710 if not keep:
711 r = self.qrepo()
711 r = self.qrepo()
712 if r:
712 if r:
713 r[None].remove(patches, True)
713 r[None].remove(patches, True)
714 else:
714 else:
715 for p in patches:
715 for p in patches:
716 os.unlink(self.join(p))
716 os.unlink(self.join(p))
717
717
718 if numrevs:
718 if numrevs:
719 del self.applied[:numrevs]
719 del self.applied[:numrevs]
720 self.applied_dirty = 1
720 self.applied_dirty = 1
721
721
722 for i in sorted([self.find_series(p) for p in patches], reverse=True):
722 for i in sorted([self.find_series(p) for p in patches], reverse=True):
723 del self.full_series[i]
723 del self.full_series[i]
724 self.parse_series()
724 self.parse_series()
725 self.series_dirty = 1
725 self.series_dirty = 1
726
726
727 def _revpatches(self, repo, revs):
727 def _revpatches(self, repo, revs):
728 firstrev = repo[self.applied[0].node].rev()
728 firstrev = repo[self.applied[0].node].rev()
729 patches = []
729 patches = []
730 for i, rev in enumerate(revs):
730 for i, rev in enumerate(revs):
731
731
732 if rev < firstrev:
732 if rev < firstrev:
733 raise util.Abort(_('revision %d is not managed') % rev)
733 raise util.Abort(_('revision %d is not managed') % rev)
734
734
735 ctx = repo[rev]
735 ctx = repo[rev]
736 base = self.applied[i].node
736 base = self.applied[i].node
737 if ctx.node() != base:
737 if ctx.node() != base:
738 msg = _('cannot delete revision %d above applied patches')
738 msg = _('cannot delete revision %d above applied patches')
739 raise util.Abort(msg % rev)
739 raise util.Abort(msg % rev)
740
740
741 patch = self.applied[i].name
741 patch = self.applied[i].name
742 for fmt in ('[mq]: %s', 'imported patch %s'):
742 for fmt in ('[mq]: %s', 'imported patch %s'):
743 if ctx.description() == fmt % patch:
743 if ctx.description() == fmt % patch:
744 msg = _('patch %s finalized without changeset message\n')
744 msg = _('patch %s finalized without changeset message\n')
745 repo.ui.status(msg % patch)
745 repo.ui.status(msg % patch)
746 break
746 break
747
747
748 patches.append(patch)
748 patches.append(patch)
749 return patches
749 return patches
750
750
751 def finish(self, repo, revs):
751 def finish(self, repo, revs):
752 patches = self._revpatches(repo, sorted(revs))
752 patches = self._revpatches(repo, sorted(revs))
753 self._cleanup(patches, len(patches))
753 self._cleanup(patches, len(patches))
754
754
755 def delete(self, repo, patches, opts):
755 def delete(self, repo, patches, opts):
756 if not patches and not opts.get('rev'):
756 if not patches and not opts.get('rev'):
757 raise util.Abort(_('qdelete requires at least one revision or '
757 raise util.Abort(_('qdelete requires at least one revision or '
758 'patch name'))
758 'patch name'))
759
759
760 realpatches = []
760 realpatches = []
761 for patch in patches:
761 for patch in patches:
762 patch = self.lookup(patch, strict=True)
762 patch = self.lookup(patch, strict=True)
763 info = self.isapplied(patch)
763 info = self.isapplied(patch)
764 if info:
764 if info:
765 raise util.Abort(_("cannot delete applied patch %s") % patch)
765 raise util.Abort(_("cannot delete applied patch %s") % patch)
766 if patch not in self.series:
766 if patch not in self.series:
767 raise util.Abort(_("patch %s not in series file") % patch)
767 raise util.Abort(_("patch %s not in series file") % patch)
768 realpatches.append(patch)
768 realpatches.append(patch)
769
769
770 numrevs = 0
770 numrevs = 0
771 if opts.get('rev'):
771 if opts.get('rev'):
772 if not self.applied:
772 if not self.applied:
773 raise util.Abort(_('no patches applied'))
773 raise util.Abort(_('no patches applied'))
774 revs = cmdutil.revrange(repo, opts['rev'])
774 revs = cmdutil.revrange(repo, opts['rev'])
775 if len(revs) > 1 and revs[0] > revs[1]:
775 if len(revs) > 1 and revs[0] > revs[1]:
776 revs.reverse()
776 revs.reverse()
777 revpatches = self._revpatches(repo, revs)
777 revpatches = self._revpatches(repo, revs)
778 realpatches += revpatches
778 realpatches += revpatches
779 numrevs = len(revpatches)
779 numrevs = len(revpatches)
780
780
781 self._cleanup(realpatches, numrevs, opts.get('keep'))
781 self._cleanup(realpatches, numrevs, opts.get('keep'))
782
782
783 def check_toppatch(self, repo):
783 def check_toppatch(self, repo):
784 if self.applied:
784 if self.applied:
785 top = self.applied[-1].node
785 top = self.applied[-1].node
786 patch = self.applied[-1].name
786 patch = self.applied[-1].name
787 pp = repo.dirstate.parents()
787 pp = repo.dirstate.parents()
788 if top not in pp:
788 if top not in pp:
789 raise util.Abort(_("working directory revision is not qtip"))
789 raise util.Abort(_("working directory revision is not qtip"))
790 return top, patch
790 return top, patch
791 return None, None
791 return None, None
792
792
793 def check_localchanges(self, repo, force=False, refresh=True):
793 def check_localchanges(self, repo, force=False, refresh=True):
794 m, a, r, d = repo.status()[:4]
794 m, a, r, d = repo.status()[:4]
795 if (m or a or r or d) and not force:
795 if (m or a or r or d) and not force:
796 if refresh:
796 if refresh:
797 raise util.Abort(_("local changes found, refresh first"))
797 raise util.Abort(_("local changes found, refresh first"))
798 else:
798 else:
799 raise util.Abort(_("local changes found"))
799 raise util.Abort(_("local changes found"))
800 return m, a, r, d
800 return m, a, r, d
801
801
802 _reserved = ('series', 'status', 'guards')
802 _reserved = ('series', 'status', 'guards')
803 def check_reserved_name(self, name):
803 def check_reserved_name(self, name):
804 if (name in self._reserved or name.startswith('.hg')
804 if (name in self._reserved or name.startswith('.hg')
805 or name.startswith('.mq') or '#' in name or ':' in name):
805 or name.startswith('.mq') or '#' in name or ':' in name):
806 raise util.Abort(_('"%s" cannot be used as the name of a patch')
806 raise util.Abort(_('"%s" cannot be used as the name of a patch')
807 % name)
807 % name)
808
808
809 def new(self, repo, patchfn, *pats, **opts):
809 def new(self, repo, patchfn, *pats, **opts):
810 """options:
810 """options:
811 msg: a string or a no-argument function returning a string
811 msg: a string or a no-argument function returning a string
812 """
812 """
813 msg = opts.get('msg')
813 msg = opts.get('msg')
814 user = opts.get('user')
814 user = opts.get('user')
815 date = opts.get('date')
815 date = opts.get('date')
816 if date:
816 if date:
817 date = util.parsedate(date)
817 date = util.parsedate(date)
818 diffopts = self.diffopts({'git': opts.get('git')})
818 diffopts = self.diffopts({'git': opts.get('git')})
819 self.check_reserved_name(patchfn)
819 self.check_reserved_name(patchfn)
820 if os.path.exists(self.join(patchfn)):
820 if os.path.exists(self.join(patchfn)):
821 raise util.Abort(_('patch "%s" already exists') % patchfn)
821 raise util.Abort(_('patch "%s" already exists') % patchfn)
822 if opts.get('include') or opts.get('exclude') or pats:
822 if opts.get('include') or opts.get('exclude') or pats:
823 match = cmdutil.match(repo, pats, opts)
823 match = cmdutil.match(repo, pats, opts)
824 # detect missing files in pats
824 # detect missing files in pats
825 def badfn(f, msg):
825 def badfn(f, msg):
826 raise util.Abort('%s: %s' % (f, msg))
826 raise util.Abort('%s: %s' % (f, msg))
827 match.bad = badfn
827 match.bad = badfn
828 m, a, r, d = repo.status(match=match)[:4]
828 m, a, r, d = repo.status(match=match)[:4]
829 else:
829 else:
830 m, a, r, d = self.check_localchanges(repo, force=True)
830 m, a, r, d = self.check_localchanges(repo, force=True)
831 match = cmdutil.matchfiles(repo, m + a + r)
831 match = cmdutil.matchfiles(repo, m + a + r)
832 if len(repo[None].parents()) > 1:
832 if len(repo[None].parents()) > 1:
833 raise util.Abort(_('cannot manage merge changesets'))
833 raise util.Abort(_('cannot manage merge changesets'))
834 commitfiles = m + a + r
834 commitfiles = m + a + r
835 self.check_toppatch(repo)
835 self.check_toppatch(repo)
836 insert = self.full_series_end()
836 insert = self.full_series_end()
837 wlock = repo.wlock()
837 wlock = repo.wlock()
838 try:
838 try:
839 # if patch file write fails, abort early
839 # if patch file write fails, abort early
840 p = self.opener(patchfn, "w")
840 p = self.opener(patchfn, "w")
841 try:
841 try:
842 if self.plainmode:
842 if self.plainmode:
843 if user:
843 if user:
844 p.write("From: " + user + "\n")
844 p.write("From: " + user + "\n")
845 if not date:
845 if not date:
846 p.write("\n")
846 p.write("\n")
847 if date:
847 if date:
848 p.write("Date: %d %d\n\n" % date)
848 p.write("Date: %d %d\n\n" % date)
849 else:
849 else:
850 p.write("# HG changeset patch\n")
850 p.write("# HG changeset patch\n")
851 p.write("# Parent "
851 p.write("# Parent "
852 + hex(repo[None].parents()[0].node()) + "\n")
852 + hex(repo[None].parents()[0].node()) + "\n")
853 if user:
853 if user:
854 p.write("# User " + user + "\n")
854 p.write("# User " + user + "\n")
855 if date:
855 if date:
856 p.write("# Date %s %s\n\n" % date)
856 p.write("# Date %s %s\n\n" % date)
857 if hasattr(msg, '__call__'):
857 if hasattr(msg, '__call__'):
858 msg = msg()
858 msg = msg()
859 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
859 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
860 n = repo.commit(commitmsg, user, date, match=match, force=True)
860 n = repo.commit(commitmsg, user, date, match=match, force=True)
861 if n is None:
861 if n is None:
862 raise util.Abort(_("repo commit failed"))
862 raise util.Abort(_("repo commit failed"))
863 try:
863 try:
864 self.full_series[insert:insert] = [patchfn]
864 self.full_series[insert:insert] = [patchfn]
865 self.applied.append(statusentry(n, patchfn))
865 self.applied.append(statusentry(n, patchfn))
866 self.parse_series()
866 self.parse_series()
867 self.series_dirty = 1
867 self.series_dirty = 1
868 self.applied_dirty = 1
868 self.applied_dirty = 1
869 if msg:
869 if msg:
870 msg = msg + "\n\n"
870 msg = msg + "\n\n"
871 p.write(msg)
871 p.write(msg)
872 if commitfiles:
872 if commitfiles:
873 parent = self.qparents(repo, n)
873 parent = self.qparents(repo, n)
874 chunks = patch.diff(repo, node1=parent, node2=n,
874 chunks = patch.diff(repo, node1=parent, node2=n,
875 match=match, opts=diffopts)
875 match=match, opts=diffopts)
876 for chunk in chunks:
876 for chunk in chunks:
877 p.write(chunk)
877 p.write(chunk)
878 p.close()
878 p.close()
879 wlock.release()
879 wlock.release()
880 wlock = None
880 wlock = None
881 r = self.qrepo()
881 r = self.qrepo()
882 if r:
882 if r:
883 r[None].add([patchfn])
883 r[None].add([patchfn])
884 except:
884 except:
885 repo.rollback()
885 repo.rollback()
886 raise
886 raise
887 except Exception:
887 except Exception:
888 patchpath = self.join(patchfn)
888 patchpath = self.join(patchfn)
889 try:
889 try:
890 os.unlink(patchpath)
890 os.unlink(patchpath)
891 except:
891 except:
892 self.ui.warn(_('error unlinking %s\n') % patchpath)
892 self.ui.warn(_('error unlinking %s\n') % patchpath)
893 raise
893 raise
894 self.removeundo(repo)
894 self.removeundo(repo)
895 finally:
895 finally:
896 release(wlock)
896 release(wlock)
897
897
898 def strip(self, repo, rev, update=True, backup="all", force=None):
898 def strip(self, repo, rev, update=True, backup="all", force=None):
899 wlock = lock = None
899 wlock = lock = None
900 try:
900 try:
901 wlock = repo.wlock()
901 wlock = repo.wlock()
902 lock = repo.lock()
902 lock = repo.lock()
903
903
904 if update:
904 if update:
905 self.check_localchanges(repo, force=force, refresh=False)
905 self.check_localchanges(repo, force=force, refresh=False)
906 urev = self.qparents(repo, rev)
906 urev = self.qparents(repo, rev)
907 hg.clean(repo, urev)
907 hg.clean(repo, urev)
908 repo.dirstate.write()
908 repo.dirstate.write()
909
909
910 self.removeundo(repo)
910 self.removeundo(repo)
911 repair.strip(self.ui, repo, rev, backup)
911 repair.strip(self.ui, repo, rev, backup)
912 # strip may have unbundled a set of backed up revisions after
912 # strip may have unbundled a set of backed up revisions after
913 # the actual strip
913 # the actual strip
914 self.removeundo(repo)
914 self.removeundo(repo)
915 finally:
915 finally:
916 release(lock, wlock)
916 release(lock, wlock)
917
917
918 def isapplied(self, patch):
918 def isapplied(self, patch):
919 """returns (index, rev, patch)"""
919 """returns (index, rev, patch)"""
920 for i, a in enumerate(self.applied):
920 for i, a in enumerate(self.applied):
921 if a.name == patch:
921 if a.name == patch:
922 return (i, a.node, a.name)
922 return (i, a.node, a.name)
923 return None
923 return None
924
924
925 # if the exact patch name does not exist, we try a few
925 # if the exact patch name does not exist, we try a few
926 # variations. If strict is passed, we try only #1
926 # variations. If strict is passed, we try only #1
927 #
927 #
928 # 1) a number to indicate an offset in the series file
928 # 1) a number to indicate an offset in the series file
929 # 2) a unique substring of the patch name was given
929 # 2) a unique substring of the patch name was given
930 # 3) patchname[-+]num to indicate an offset in the series file
930 # 3) patchname[-+]num to indicate an offset in the series file
931 def lookup(self, patch, strict=False):
931 def lookup(self, patch, strict=False):
932 patch = patch and str(patch)
932 patch = patch and str(patch)
933
933
934 def partial_name(s):
934 def partial_name(s):
935 if s in self.series:
935 if s in self.series:
936 return s
936 return s
937 matches = [x for x in self.series if s in x]
937 matches = [x for x in self.series if s in x]
938 if len(matches) > 1:
938 if len(matches) > 1:
939 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
939 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
940 for m in matches:
940 for m in matches:
941 self.ui.warn(' %s\n' % m)
941 self.ui.warn(' %s\n' % m)
942 return None
942 return None
943 if matches:
943 if matches:
944 return matches[0]
944 return matches[0]
945 if self.series and self.applied:
945 if self.series and self.applied:
946 if s == 'qtip':
946 if s == 'qtip':
947 return self.series[self.series_end(True)-1]
947 return self.series[self.series_end(True)-1]
948 if s == 'qbase':
948 if s == 'qbase':
949 return self.series[0]
949 return self.series[0]
950 return None
950 return None
951
951
952 if patch is None:
952 if patch is None:
953 return None
953 return None
954 if patch in self.series:
954 if patch in self.series:
955 return patch
955 return patch
956
956
957 if not os.path.isfile(self.join(patch)):
957 if not os.path.isfile(self.join(patch)):
958 try:
958 try:
959 sno = int(patch)
959 sno = int(patch)
960 except (ValueError, OverflowError):
960 except (ValueError, OverflowError):
961 pass
961 pass
962 else:
962 else:
963 if -len(self.series) <= sno < len(self.series):
963 if -len(self.series) <= sno < len(self.series):
964 return self.series[sno]
964 return self.series[sno]
965
965
966 if not strict:
966 if not strict:
967 res = partial_name(patch)
967 res = partial_name(patch)
968 if res:
968 if res:
969 return res
969 return res
970 minus = patch.rfind('-')
970 minus = patch.rfind('-')
971 if minus >= 0:
971 if minus >= 0:
972 res = partial_name(patch[:minus])
972 res = partial_name(patch[:minus])
973 if res:
973 if res:
974 i = self.series.index(res)
974 i = self.series.index(res)
975 try:
975 try:
976 off = int(patch[minus + 1:] or 1)
976 off = int(patch[minus + 1:] or 1)
977 except (ValueError, OverflowError):
977 except (ValueError, OverflowError):
978 pass
978 pass
979 else:
979 else:
980 if i - off >= 0:
980 if i - off >= 0:
981 return self.series[i - off]
981 return self.series[i - off]
982 plus = patch.rfind('+')
982 plus = patch.rfind('+')
983 if plus >= 0:
983 if plus >= 0:
984 res = partial_name(patch[:plus])
984 res = partial_name(patch[:plus])
985 if res:
985 if res:
986 i = self.series.index(res)
986 i = self.series.index(res)
987 try:
987 try:
988 off = int(patch[plus + 1:] or 1)
988 off = int(patch[plus + 1:] or 1)
989 except (ValueError, OverflowError):
989 except (ValueError, OverflowError):
990 pass
990 pass
991 else:
991 else:
992 if i + off < len(self.series):
992 if i + off < len(self.series):
993 return self.series[i + off]
993 return self.series[i + off]
994 raise util.Abort(_("patch %s not in series") % patch)
994 raise util.Abort(_("patch %s not in series") % patch)
995
995
996 def push(self, repo, patch=None, force=False, list=False,
996 def push(self, repo, patch=None, force=False, list=False,
997 mergeq=None, all=False, move=False):
997 mergeq=None, all=False, move=False):
998 diffopts = self.diffopts()
998 diffopts = self.diffopts()
999 wlock = repo.wlock()
999 wlock = repo.wlock()
1000 try:
1000 try:
1001 heads = []
1001 heads = []
1002 for b, ls in repo.branchmap().iteritems():
1002 for b, ls in repo.branchmap().iteritems():
1003 heads += ls
1003 heads += ls
1004 if not heads:
1004 if not heads:
1005 heads = [nullid]
1005 heads = [nullid]
1006 if repo.dirstate.parents()[0] not in heads:
1006 if repo.dirstate.parents()[0] not in heads:
1007 self.ui.status(_("(working directory not at a head)\n"))
1007 self.ui.status(_("(working directory not at a head)\n"))
1008
1008
1009 if not self.series:
1009 if not self.series:
1010 self.ui.warn(_('no patches in series\n'))
1010 self.ui.warn(_('no patches in series\n'))
1011 return 0
1011 return 0
1012
1012
1013 patch = self.lookup(patch)
1013 patch = self.lookup(patch)
1014 # Suppose our series file is: A B C and the current 'top'
1014 # Suppose our series file is: A B C and the current 'top'
1015 # patch is B. qpush C should be performed (moving forward)
1015 # patch is B. qpush C should be performed (moving forward)
1016 # qpush B is a NOP (no change) qpush A is an error (can't
1016 # qpush B is a NOP (no change) qpush A is an error (can't
1017 # go backwards with qpush)
1017 # go backwards with qpush)
1018 if patch:
1018 if patch:
1019 info = self.isapplied(patch)
1019 info = self.isapplied(patch)
1020 if info:
1020 if info:
1021 if info[0] < len(self.applied) - 1:
1021 if info[0] < len(self.applied) - 1:
1022 raise util.Abort(
1022 raise util.Abort(
1023 _("cannot push to a previous patch: %s") % patch)
1023 _("cannot push to a previous patch: %s") % patch)
1024 self.ui.warn(
1024 self.ui.warn(
1025 _('qpush: %s is already at the top\n') % patch)
1025 _('qpush: %s is already at the top\n') % patch)
1026 return 0
1026 return 0
1027 pushable, reason = self.pushable(patch)
1027 pushable, reason = self.pushable(patch)
1028 if not pushable:
1028 if not pushable:
1029 if reason:
1029 if reason:
1030 reason = _('guarded by %r') % reason
1030 reason = _('guarded by %r') % reason
1031 else:
1031 else:
1032 reason = _('no matching guards')
1032 reason = _('no matching guards')
1033 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1033 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1034 return 1
1034 return 1
1035 elif all:
1035 elif all:
1036 patch = self.series[-1]
1036 patch = self.series[-1]
1037 if self.isapplied(patch):
1037 if self.isapplied(patch):
1038 self.ui.warn(_('all patches are currently applied\n'))
1038 self.ui.warn(_('all patches are currently applied\n'))
1039 return 0
1039 return 0
1040
1040
1041 # Following the above example, starting at 'top' of B:
1041 # Following the above example, starting at 'top' of B:
1042 # qpush should be performed (pushes C), but a subsequent
1042 # qpush should be performed (pushes C), but a subsequent
1043 # qpush without an argument is an error (nothing to
1043 # qpush without an argument is an error (nothing to
1044 # apply). This allows a loop of "...while hg qpush..." to
1044 # apply). This allows a loop of "...while hg qpush..." to
1045 # work as it detects an error when done
1045 # work as it detects an error when done
1046 start = self.series_end()
1046 start = self.series_end()
1047 if start == len(self.series):
1047 if start == len(self.series):
1048 self.ui.warn(_('patch series already fully applied\n'))
1048 self.ui.warn(_('patch series already fully applied\n'))
1049 return 1
1049 return 1
1050 if not force:
1050 if not force:
1051 self.check_localchanges(repo)
1051 self.check_localchanges(repo)
1052
1052
1053 if move:
1053 if move:
1054 try:
1054 try:
1055 index = self.series.index(patch, start)
1055 index = self.series.index(patch, start)
1056 fullpatch = self.full_series[index]
1056 fullpatch = self.full_series[index]
1057 del self.full_series[index]
1057 del self.full_series[index]
1058 except ValueError:
1058 except ValueError:
1059 raise util.Abort(_("patch '%s' not found") % patch)
1059 raise util.Abort(_("patch '%s' not found") % patch)
1060 self.full_series.insert(start, fullpatch)
1060 self.full_series.insert(start, fullpatch)
1061 self.parse_series()
1061 self.parse_series()
1062 self.series_dirty = 1
1062 self.series_dirty = 1
1063
1063
1064 self.applied_dirty = 1
1064 self.applied_dirty = 1
1065 if start > 0:
1065 if start > 0:
1066 self.check_toppatch(repo)
1066 self.check_toppatch(repo)
1067 if not patch:
1067 if not patch:
1068 patch = self.series[start]
1068 patch = self.series[start]
1069 end = start + 1
1069 end = start + 1
1070 else:
1070 else:
1071 end = self.series.index(patch, start) + 1
1071 end = self.series.index(patch, start) + 1
1072
1072
1073 s = self.series[start:end]
1073 s = self.series[start:end]
1074 all_files = set()
1074 all_files = set()
1075 try:
1075 try:
1076 if mergeq:
1076 if mergeq:
1077 ret = self.mergepatch(repo, mergeq, s, diffopts)
1077 ret = self.mergepatch(repo, mergeq, s, diffopts)
1078 else:
1078 else:
1079 ret = self.apply(repo, s, list, all_files=all_files)
1079 ret = self.apply(repo, s, list, all_files=all_files)
1080 except:
1080 except:
1081 self.ui.warn(_('cleaning up working directory...'))
1081 self.ui.warn(_('cleaning up working directory...'))
1082 node = repo.dirstate.parents()[0]
1082 node = repo.dirstate.parents()[0]
1083 hg.revert(repo, node, None)
1083 hg.revert(repo, node, None)
1084 # only remove unknown files that we know we touched or
1084 # only remove unknown files that we know we touched or
1085 # created while patching
1085 # created while patching
1086 for f in all_files:
1086 for f in all_files:
1087 if f not in repo.dirstate:
1087 if f not in repo.dirstate:
1088 try:
1088 try:
1089 util.unlink(repo.wjoin(f))
1089 util.unlink(repo.wjoin(f))
1090 except OSError, inst:
1090 except OSError, inst:
1091 if inst.errno != errno.ENOENT:
1091 if inst.errno != errno.ENOENT:
1092 raise
1092 raise
1093 self.ui.warn(_('done\n'))
1093 self.ui.warn(_('done\n'))
1094 raise
1094 raise
1095
1095
1096 if not self.applied:
1096 if not self.applied:
1097 return ret[0]
1097 return ret[0]
1098 top = self.applied[-1].name
1098 top = self.applied[-1].name
1099 if ret[0] and ret[0] > 1:
1099 if ret[0] and ret[0] > 1:
1100 msg = _("errors during apply, please fix and refresh %s\n")
1100 msg = _("errors during apply, please fix and refresh %s\n")
1101 self.ui.write(msg % top)
1101 self.ui.write(msg % top)
1102 else:
1102 else:
1103 self.ui.write(_("now at: %s\n") % top)
1103 self.ui.write(_("now at: %s\n") % top)
1104 return ret[0]
1104 return ret[0]
1105
1105
1106 finally:
1106 finally:
1107 wlock.release()
1107 wlock.release()
1108
1108
1109 def pop(self, repo, patch=None, force=False, update=True, all=False):
1109 def pop(self, repo, patch=None, force=False, update=True, all=False):
1110 wlock = repo.wlock()
1110 wlock = repo.wlock()
1111 try:
1111 try:
1112 if patch:
1112 if patch:
1113 # index, rev, patch
1113 # index, rev, patch
1114 info = self.isapplied(patch)
1114 info = self.isapplied(patch)
1115 if not info:
1115 if not info:
1116 patch = self.lookup(patch)
1116 patch = self.lookup(patch)
1117 info = self.isapplied(patch)
1117 info = self.isapplied(patch)
1118 if not info:
1118 if not info:
1119 raise util.Abort(_("patch %s is not applied") % patch)
1119 raise util.Abort(_("patch %s is not applied") % patch)
1120
1120
1121 if not self.applied:
1121 if not self.applied:
1122 # Allow qpop -a to work repeatedly,
1122 # Allow qpop -a to work repeatedly,
1123 # but not qpop without an argument
1123 # but not qpop without an argument
1124 self.ui.warn(_("no patches applied\n"))
1124 self.ui.warn(_("no patches applied\n"))
1125 return not all
1125 return not all
1126
1126
1127 if all:
1127 if all:
1128 start = 0
1128 start = 0
1129 elif patch:
1129 elif patch:
1130 start = info[0] + 1
1130 start = info[0] + 1
1131 else:
1131 else:
1132 start = len(self.applied) - 1
1132 start = len(self.applied) - 1
1133
1133
1134 if start >= len(self.applied):
1134 if start >= len(self.applied):
1135 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1135 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1136 return
1136 return
1137
1137
1138 if not update:
1138 if not update:
1139 parents = repo.dirstate.parents()
1139 parents = repo.dirstate.parents()
1140 rr = [x.node for x in self.applied]
1140 rr = [x.node for x in self.applied]
1141 for p in parents:
1141 for p in parents:
1142 if p in rr:
1142 if p in rr:
1143 self.ui.warn(_("qpop: forcing dirstate update\n"))
1143 self.ui.warn(_("qpop: forcing dirstate update\n"))
1144 update = True
1144 update = True
1145 else:
1145 else:
1146 parents = [p.node() for p in repo[None].parents()]
1146 parents = [p.node() for p in repo[None].parents()]
1147 needupdate = False
1147 needupdate = False
1148 for entry in self.applied[start:]:
1148 for entry in self.applied[start:]:
1149 if entry.node in parents:
1149 if entry.node in parents:
1150 needupdate = True
1150 needupdate = True
1151 break
1151 break
1152 update = needupdate
1152 update = needupdate
1153
1153
1154 if not force and update:
1154 if not force and update:
1155 self.check_localchanges(repo)
1155 self.check_localchanges(repo)
1156
1156
1157 self.applied_dirty = 1
1157 self.applied_dirty = 1
1158 end = len(self.applied)
1158 end = len(self.applied)
1159 rev = self.applied[start].node
1159 rev = self.applied[start].node
1160 if update:
1160 if update:
1161 top = self.check_toppatch(repo)[0]
1161 top = self.check_toppatch(repo)[0]
1162
1162
1163 try:
1163 try:
1164 heads = repo.changelog.heads(rev)
1164 heads = repo.changelog.heads(rev)
1165 except error.LookupError:
1165 except error.LookupError:
1166 node = short(rev)
1166 node = short(rev)
1167 raise util.Abort(_('trying to pop unknown node %s') % node)
1167 raise util.Abort(_('trying to pop unknown node %s') % node)
1168
1168
1169 if heads != [self.applied[-1].node]:
1169 if heads != [self.applied[-1].node]:
1170 raise util.Abort(_("popping would remove a revision not "
1170 raise util.Abort(_("popping would remove a revision not "
1171 "managed by this patch queue"))
1171 "managed by this patch queue"))
1172
1172
1173 # we know there are no local changes, so we can make a simplified
1173 # we know there are no local changes, so we can make a simplified
1174 # form of hg.update.
1174 # form of hg.update.
1175 if update:
1175 if update:
1176 qp = self.qparents(repo, rev)
1176 qp = self.qparents(repo, rev)
1177 ctx = repo[qp]
1177 ctx = repo[qp]
1178 m, a, r, d = repo.status(qp, top)[:4]
1178 m, a, r, d = repo.status(qp, top)[:4]
1179 if d:
1179 if d:
1180 raise util.Abort(_("deletions found between repo revs"))
1180 raise util.Abort(_("deletions found between repo revs"))
1181 for f in a:
1181 for f in a:
1182 try:
1182 try:
1183 util.unlink(repo.wjoin(f))
1183 util.unlink(repo.wjoin(f))
1184 except OSError, e:
1184 except OSError, e:
1185 if e.errno != errno.ENOENT:
1185 if e.errno != errno.ENOENT:
1186 raise
1186 raise
1187 repo.dirstate.forget(f)
1187 repo.dirstate.forget(f)
1188 for f in m + r:
1188 for f in m + r:
1189 fctx = ctx[f]
1189 fctx = ctx[f]
1190 repo.wwrite(f, fctx.data(), fctx.flags())
1190 repo.wwrite(f, fctx.data(), fctx.flags())
1191 repo.dirstate.normal(f)
1191 repo.dirstate.normal(f)
1192 repo.dirstate.setparents(qp, nullid)
1192 repo.dirstate.setparents(qp, nullid)
1193 for patch in reversed(self.applied[start:end]):
1193 for patch in reversed(self.applied[start:end]):
1194 self.ui.status(_("popping %s\n") % patch.name)
1194 self.ui.status(_("popping %s\n") % patch.name)
1195 del self.applied[start:end]
1195 del self.applied[start:end]
1196 self.strip(repo, rev, update=False, backup='strip')
1196 self.strip(repo, rev, update=False, backup='strip')
1197 if self.applied:
1197 if self.applied:
1198 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1198 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1199 else:
1199 else:
1200 self.ui.write(_("patch queue now empty\n"))
1200 self.ui.write(_("patch queue now empty\n"))
1201 finally:
1201 finally:
1202 wlock.release()
1202 wlock.release()
1203
1203
1204 def diff(self, repo, pats, opts):
1204 def diff(self, repo, pats, opts):
1205 top, patch = self.check_toppatch(repo)
1205 top, patch = self.check_toppatch(repo)
1206 if not top:
1206 if not top:
1207 self.ui.write(_("no patches applied\n"))
1207 self.ui.write(_("no patches applied\n"))
1208 return
1208 return
1209 qp = self.qparents(repo, top)
1209 qp = self.qparents(repo, top)
1210 if opts.get('reverse'):
1210 if opts.get('reverse'):
1211 node1, node2 = None, qp
1211 node1, node2 = None, qp
1212 else:
1212 else:
1213 node1, node2 = qp, None
1213 node1, node2 = qp, None
1214 diffopts = self.diffopts(opts, patch)
1214 diffopts = self.diffopts(opts, patch)
1215 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1215 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1216
1216
1217 def refresh(self, repo, pats=None, **opts):
1217 def refresh(self, repo, pats=None, **opts):
1218 if not self.applied:
1218 if not self.applied:
1219 self.ui.write(_("no patches applied\n"))
1219 self.ui.write(_("no patches applied\n"))
1220 return 1
1220 return 1
1221 msg = opts.get('msg', '').rstrip()
1221 msg = opts.get('msg', '').rstrip()
1222 newuser = opts.get('user')
1222 newuser = opts.get('user')
1223 newdate = opts.get('date')
1223 newdate = opts.get('date')
1224 if newdate:
1224 if newdate:
1225 newdate = '%d %d' % util.parsedate(newdate)
1225 newdate = '%d %d' % util.parsedate(newdate)
1226 wlock = repo.wlock()
1226 wlock = repo.wlock()
1227
1227
1228 try:
1228 try:
1229 self.check_toppatch(repo)
1229 self.check_toppatch(repo)
1230 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1230 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1231 if repo.changelog.heads(top) != [top]:
1231 if repo.changelog.heads(top) != [top]:
1232 raise util.Abort(_("cannot refresh a revision with children"))
1232 raise util.Abort(_("cannot refresh a revision with children"))
1233
1233
1234 cparents = repo.changelog.parents(top)
1234 cparents = repo.changelog.parents(top)
1235 patchparent = self.qparents(repo, top)
1235 patchparent = self.qparents(repo, top)
1236 ph = patchheader(self.join(patchfn), self.plainmode)
1236 ph = patchheader(self.join(patchfn), self.plainmode)
1237 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1237 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1238 if msg:
1238 if msg:
1239 ph.setmessage(msg)
1239 ph.setmessage(msg)
1240 if newuser:
1240 if newuser:
1241 ph.setuser(newuser)
1241 ph.setuser(newuser)
1242 if newdate:
1242 if newdate:
1243 ph.setdate(newdate)
1243 ph.setdate(newdate)
1244 ph.setparent(hex(patchparent))
1244 ph.setparent(hex(patchparent))
1245
1245
1246 # only commit new patch when write is complete
1246 # only commit new patch when write is complete
1247 patchf = self.opener(patchfn, 'w', atomictemp=True)
1247 patchf = self.opener(patchfn, 'w', atomictemp=True)
1248
1248
1249 comments = str(ph)
1249 comments = str(ph)
1250 if comments:
1250 if comments:
1251 patchf.write(comments)
1251 patchf.write(comments)
1252
1252
1253 # update the dirstate in place, strip off the qtip commit
1253 # update the dirstate in place, strip off the qtip commit
1254 # and then commit.
1254 # and then commit.
1255 #
1255 #
1256 # this should really read:
1256 # this should really read:
1257 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1257 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1258 # but we do it backwards to take advantage of manifest/chlog
1258 # but we do it backwards to take advantage of manifest/chlog
1259 # caching against the next repo.status call
1259 # caching against the next repo.status call
1260 mm, aa, dd, aa2 = repo.status(patchparent, top)[:4]
1260 mm, aa, dd, aa2 = repo.status(patchparent, top)[:4]
1261 changes = repo.changelog.read(top)
1261 changes = repo.changelog.read(top)
1262 man = repo.manifest.read(changes[0])
1262 man = repo.manifest.read(changes[0])
1263 aaa = aa[:]
1263 aaa = aa[:]
1264 matchfn = cmdutil.match(repo, pats, opts)
1264 matchfn = cmdutil.match(repo, pats, opts)
1265 # in short mode, we only diff the files included in the
1265 # in short mode, we only diff the files included in the
1266 # patch already plus specified files
1266 # patch already plus specified files
1267 if opts.get('short'):
1267 if opts.get('short'):
1268 # if amending a patch, we start with existing
1268 # if amending a patch, we start with existing
1269 # files plus specified files - unfiltered
1269 # files plus specified files - unfiltered
1270 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1270 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1271 # filter with inc/exl options
1271 # filter with inc/exl options
1272 matchfn = cmdutil.match(repo, opts=opts)
1272 matchfn = cmdutil.match(repo, opts=opts)
1273 else:
1273 else:
1274 match = cmdutil.matchall(repo)
1274 match = cmdutil.matchall(repo)
1275 m, a, r, d = repo.status(match=match)[:4]
1275 m, a, r, d = repo.status(match=match)[:4]
1276
1276
1277 # we might end up with files that were added between
1277 # we might end up with files that were added between
1278 # qtip and the dirstate parent, but then changed in the
1278 # qtip and the dirstate parent, but then changed in the
1279 # local dirstate. in this case, we want them to only
1279 # local dirstate. in this case, we want them to only
1280 # show up in the added section
1280 # show up in the added section
1281 for x in m:
1281 for x in m:
1282 if x not in aa:
1282 if x not in aa:
1283 mm.append(x)
1283 mm.append(x)
1284 # we might end up with files added by the local dirstate that
1284 # we might end up with files added by the local dirstate that
1285 # were deleted by the patch. In this case, they should only
1285 # were deleted by the patch. In this case, they should only
1286 # show up in the changed section.
1286 # show up in the changed section.
1287 for x in a:
1287 for x in a:
1288 if x in dd:
1288 if x in dd:
1289 del dd[dd.index(x)]
1289 del dd[dd.index(x)]
1290 mm.append(x)
1290 mm.append(x)
1291 else:
1291 else:
1292 aa.append(x)
1292 aa.append(x)
1293 # make sure any files deleted in the local dirstate
1293 # make sure any files deleted in the local dirstate
1294 # are not in the add or change column of the patch
1294 # are not in the add or change column of the patch
1295 forget = []
1295 forget = []
1296 for x in d + r:
1296 for x in d + r:
1297 if x in aa:
1297 if x in aa:
1298 del aa[aa.index(x)]
1298 del aa[aa.index(x)]
1299 forget.append(x)
1299 forget.append(x)
1300 continue
1300 continue
1301 elif x in mm:
1301 elif x in mm:
1302 del mm[mm.index(x)]
1302 del mm[mm.index(x)]
1303 dd.append(x)
1303 dd.append(x)
1304
1304
1305 m = list(set(mm))
1305 m = list(set(mm))
1306 r = list(set(dd))
1306 r = list(set(dd))
1307 a = list(set(aa))
1307 a = list(set(aa))
1308 c = [filter(matchfn, l) for l in (m, a, r)]
1308 c = [filter(matchfn, l) for l in (m, a, r)]
1309 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1309 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1310 chunks = patch.diff(repo, patchparent, match=match,
1310 chunks = patch.diff(repo, patchparent, match=match,
1311 changes=c, opts=diffopts)
1311 changes=c, opts=diffopts)
1312 for chunk in chunks:
1312 for chunk in chunks:
1313 patchf.write(chunk)
1313 patchf.write(chunk)
1314
1314
1315 try:
1315 try:
1316 if diffopts.git or diffopts.upgrade:
1316 if diffopts.git or diffopts.upgrade:
1317 copies = {}
1317 copies = {}
1318 for dst in a:
1318 for dst in a:
1319 src = repo.dirstate.copied(dst)
1319 src = repo.dirstate.copied(dst)
1320 # during qfold, the source file for copies may
1320 # during qfold, the source file for copies may
1321 # be removed. Treat this as a simple add.
1321 # be removed. Treat this as a simple add.
1322 if src is not None and src in repo.dirstate:
1322 if src is not None and src in repo.dirstate:
1323 copies.setdefault(src, []).append(dst)
1323 copies.setdefault(src, []).append(dst)
1324 repo.dirstate.add(dst)
1324 repo.dirstate.add(dst)
1325 # remember the copies between patchparent and qtip
1325 # remember the copies between patchparent and qtip
1326 for dst in aaa:
1326 for dst in aaa:
1327 f = repo.file(dst)
1327 f = repo.file(dst)
1328 src = f.renamed(man[dst])
1328 src = f.renamed(man[dst])
1329 if src:
1329 if src:
1330 copies.setdefault(src[0], []).extend(
1330 copies.setdefault(src[0], []).extend(
1331 copies.get(dst, []))
1331 copies.get(dst, []))
1332 if dst in a:
1332 if dst in a:
1333 copies[src[0]].append(dst)
1333 copies[src[0]].append(dst)
1334 # we can't copy a file created by the patch itself
1334 # we can't copy a file created by the patch itself
1335 if dst in copies:
1335 if dst in copies:
1336 del copies[dst]
1336 del copies[dst]
1337 for src, dsts in copies.iteritems():
1337 for src, dsts in copies.iteritems():
1338 for dst in dsts:
1338 for dst in dsts:
1339 repo.dirstate.copy(src, dst)
1339 repo.dirstate.copy(src, dst)
1340 else:
1340 else:
1341 for dst in a:
1341 for dst in a:
1342 repo.dirstate.add(dst)
1342 repo.dirstate.add(dst)
1343 # Drop useless copy information
1343 # Drop useless copy information
1344 for f in list(repo.dirstate.copies()):
1344 for f in list(repo.dirstate.copies()):
1345 repo.dirstate.copy(None, f)
1345 repo.dirstate.copy(None, f)
1346 for f in r:
1346 for f in r:
1347 repo.dirstate.remove(f)
1347 repo.dirstate.remove(f)
1348 # if the patch excludes a modified file, mark that
1348 # if the patch excludes a modified file, mark that
1349 # file with mtime=0 so status can see it.
1349 # file with mtime=0 so status can see it.
1350 mm = []
1350 mm = []
1351 for i in xrange(len(m)-1, -1, -1):
1351 for i in xrange(len(m)-1, -1, -1):
1352 if not matchfn(m[i]):
1352 if not matchfn(m[i]):
1353 mm.append(m[i])
1353 mm.append(m[i])
1354 del m[i]
1354 del m[i]
1355 for f in m:
1355 for f in m:
1356 repo.dirstate.normal(f)
1356 repo.dirstate.normal(f)
1357 for f in mm:
1357 for f in mm:
1358 repo.dirstate.normallookup(f)
1358 repo.dirstate.normallookup(f)
1359 for f in forget:
1359 for f in forget:
1360 repo.dirstate.forget(f)
1360 repo.dirstate.forget(f)
1361
1361
1362 if not msg:
1362 if not msg:
1363 if not ph.message:
1363 if not ph.message:
1364 message = "[mq]: %s\n" % patchfn
1364 message = "[mq]: %s\n" % patchfn
1365 else:
1365 else:
1366 message = "\n".join(ph.message)
1366 message = "\n".join(ph.message)
1367 else:
1367 else:
1368 message = msg
1368 message = msg
1369
1369
1370 user = ph.user or changes[1]
1370 user = ph.user or changes[1]
1371
1371
1372 # assumes strip can roll itself back if interrupted
1372 # assumes strip can roll itself back if interrupted
1373 repo.dirstate.setparents(*cparents)
1373 repo.dirstate.setparents(*cparents)
1374 self.applied.pop()
1374 self.applied.pop()
1375 self.applied_dirty = 1
1375 self.applied_dirty = 1
1376 self.strip(repo, top, update=False,
1376 self.strip(repo, top, update=False,
1377 backup='strip')
1377 backup='strip')
1378 except:
1378 except:
1379 repo.dirstate.invalidate()
1379 repo.dirstate.invalidate()
1380 raise
1380 raise
1381
1381
1382 try:
1382 try:
1383 # might be nice to attempt to roll back strip after this
1383 # might be nice to attempt to roll back strip after this
1384 patchf.rename()
1384 patchf.rename()
1385 n = repo.commit(message, user, ph.date, match=match,
1385 n = repo.commit(message, user, ph.date, match=match,
1386 force=True)
1386 force=True)
1387 self.applied.append(statusentry(n, patchfn))
1387 self.applied.append(statusentry(n, patchfn))
1388 except:
1388 except:
1389 ctx = repo[cparents[0]]
1389 ctx = repo[cparents[0]]
1390 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1390 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1391 self.save_dirty()
1391 self.save_dirty()
1392 self.ui.warn(_('refresh interrupted while patch was popped! '
1392 self.ui.warn(_('refresh interrupted while patch was popped! '
1393 '(revert --all, qpush to recover)\n'))
1393 '(revert --all, qpush to recover)\n'))
1394 raise
1394 raise
1395 finally:
1395 finally:
1396 wlock.release()
1396 wlock.release()
1397 self.removeundo(repo)
1397 self.removeundo(repo)
1398
1398
1399 def init(self, repo, create=False):
1399 def init(self, repo, create=False):
1400 if not create and os.path.isdir(self.path):
1400 if not create and os.path.isdir(self.path):
1401 raise util.Abort(_("patch queue directory already exists"))
1401 raise util.Abort(_("patch queue directory already exists"))
1402 try:
1402 try:
1403 os.mkdir(self.path)
1403 os.mkdir(self.path)
1404 except OSError, inst:
1404 except OSError, inst:
1405 if inst.errno != errno.EEXIST or not create:
1405 if inst.errno != errno.EEXIST or not create:
1406 raise
1406 raise
1407 if create:
1407 if create:
1408 return self.qrepo(create=True)
1408 return self.qrepo(create=True)
1409
1409
1410 def unapplied(self, repo, patch=None):
1410 def unapplied(self, repo, patch=None):
1411 if patch and patch not in self.series:
1411 if patch and patch not in self.series:
1412 raise util.Abort(_("patch %s is not in series file") % patch)
1412 raise util.Abort(_("patch %s is not in series file") % patch)
1413 if not patch:
1413 if not patch:
1414 start = self.series_end()
1414 start = self.series_end()
1415 else:
1415 else:
1416 start = self.series.index(patch) + 1
1416 start = self.series.index(patch) + 1
1417 unapplied = []
1417 unapplied = []
1418 for i in xrange(start, len(self.series)):
1418 for i in xrange(start, len(self.series)):
1419 pushable, reason = self.pushable(i)
1419 pushable, reason = self.pushable(i)
1420 if pushable:
1420 if pushable:
1421 unapplied.append((i, self.series[i]))
1421 unapplied.append((i, self.series[i]))
1422 self.explain_pushable(i)
1422 self.explain_pushable(i)
1423 return unapplied
1423 return unapplied
1424
1424
1425 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1425 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1426 summary=False):
1426 summary=False):
1427 def displayname(pfx, patchname, state):
1427 def displayname(pfx, patchname, state):
1428 if pfx:
1428 if pfx:
1429 self.ui.write(pfx)
1429 self.ui.write(pfx)
1430 if summary:
1430 if summary:
1431 ph = patchheader(self.join(patchname), self.plainmode)
1431 ph = patchheader(self.join(patchname), self.plainmode)
1432 msg = ph.message and ph.message[0] or ''
1432 msg = ph.message and ph.message[0] or ''
1433 if self.ui.formatted():
1433 if self.ui.formatted():
1434 width = util.termwidth() - len(pfx) - len(patchname) - 2
1434 width = util.termwidth() - len(pfx) - len(patchname) - 2
1435 if width > 0:
1435 if width > 0:
1436 msg = util.ellipsis(msg, width)
1436 msg = util.ellipsis(msg, width)
1437 else:
1437 else:
1438 msg = ''
1438 msg = ''
1439 self.ui.write(patchname, label='qseries.' + state)
1439 self.ui.write(patchname, label='qseries.' + state)
1440 self.ui.write(': ')
1440 self.ui.write(': ')
1441 self.ui.write(msg, label='qseries.message.' + state)
1441 self.ui.write(msg, label='qseries.message.' + state)
1442 else:
1442 else:
1443 self.ui.write(patchname, label='qseries.' + state)
1443 self.ui.write(patchname, label='qseries.' + state)
1444 self.ui.write('\n')
1444 self.ui.write('\n')
1445
1445
1446 applied = set([p.name for p in self.applied])
1446 applied = set([p.name for p in self.applied])
1447 if length is None:
1447 if length is None:
1448 length = len(self.series) - start
1448 length = len(self.series) - start
1449 if not missing:
1449 if not missing:
1450 if self.ui.verbose:
1450 if self.ui.verbose:
1451 idxwidth = len(str(start + length - 1))
1451 idxwidth = len(str(start + length - 1))
1452 for i in xrange(start, start + length):
1452 for i in xrange(start, start + length):
1453 patch = self.series[i]
1453 patch = self.series[i]
1454 if patch in applied:
1454 if patch in applied:
1455 char, state = 'A', 'applied'
1455 char, state = 'A', 'applied'
1456 elif self.pushable(i)[0]:
1456 elif self.pushable(i)[0]:
1457 char, state = 'U', 'unapplied'
1457 char, state = 'U', 'unapplied'
1458 else:
1458 else:
1459 char, state = 'G', 'guarded'
1459 char, state = 'G', 'guarded'
1460 pfx = ''
1460 pfx = ''
1461 if self.ui.verbose:
1461 if self.ui.verbose:
1462 pfx = '%*d %s ' % (idxwidth, i, char)
1462 pfx = '%*d %s ' % (idxwidth, i, char)
1463 elif status and status != char:
1463 elif status and status != char:
1464 continue
1464 continue
1465 displayname(pfx, patch, state)
1465 displayname(pfx, patch, state)
1466 else:
1466 else:
1467 msng_list = []
1467 msng_list = []
1468 for root, dirs, files in os.walk(self.path):
1468 for root, dirs, files in os.walk(self.path):
1469 d = root[len(self.path) + 1:]
1469 d = root[len(self.path) + 1:]
1470 for f in files:
1470 for f in files:
1471 fl = os.path.join(d, f)
1471 fl = os.path.join(d, f)
1472 if (fl not in self.series and
1472 if (fl not in self.series and
1473 fl not in (self.status_path, self.series_path,
1473 fl not in (self.status_path, self.series_path,
1474 self.guards_path)
1474 self.guards_path)
1475 and not fl.startswith('.')):
1475 and not fl.startswith('.')):
1476 msng_list.append(fl)
1476 msng_list.append(fl)
1477 for x in sorted(msng_list):
1477 for x in sorted(msng_list):
1478 pfx = self.ui.verbose and ('D ') or ''
1478 pfx = self.ui.verbose and ('D ') or ''
1479 displayname(pfx, x, 'missing')
1479 displayname(pfx, x, 'missing')
1480
1480
1481 def issaveline(self, l):
1481 def issaveline(self, l):
1482 if l.name == '.hg.patches.save.line':
1482 if l.name == '.hg.patches.save.line':
1483 return True
1483 return True
1484
1484
1485 def qrepo(self, create=False):
1485 def qrepo(self, create=False):
1486 if create or os.path.isdir(self.join(".hg")):
1486 if create or os.path.isdir(self.join(".hg")):
1487 return hg.repository(self.ui, path=self.path, create=create)
1487 return hg.repository(self.ui, path=self.path, create=create)
1488
1488
1489 def restore(self, repo, rev, delete=None, qupdate=None):
1489 def restore(self, repo, rev, delete=None, qupdate=None):
1490 desc = repo[rev].description().strip()
1490 desc = repo[rev].description().strip()
1491 lines = desc.splitlines()
1491 lines = desc.splitlines()
1492 i = 0
1492 i = 0
1493 datastart = None
1493 datastart = None
1494 series = []
1494 series = []
1495 applied = []
1495 applied = []
1496 qpp = None
1496 qpp = None
1497 for i, line in enumerate(lines):
1497 for i, line in enumerate(lines):
1498 if line == 'Patch Data:':
1498 if line == 'Patch Data:':
1499 datastart = i + 1
1499 datastart = i + 1
1500 elif line.startswith('Dirstate:'):
1500 elif line.startswith('Dirstate:'):
1501 l = line.rstrip()
1501 l = line.rstrip()
1502 l = l[10:].split(' ')
1502 l = l[10:].split(' ')
1503 qpp = [bin(x) for x in l]
1503 qpp = [bin(x) for x in l]
1504 elif datastart != None:
1504 elif datastart != None:
1505 l = line.rstrip()
1505 l = line.rstrip()
1506 n, name = l.split(':', 1)
1506 n, name = l.split(':', 1)
1507 if n:
1507 if n:
1508 applied.append(statusentry(bin(n), name))
1508 applied.append(statusentry(bin(n), name))
1509 else:
1509 else:
1510 series.append(l)
1510 series.append(l)
1511 if datastart is None:
1511 if datastart is None:
1512 self.ui.warn(_("No saved patch data found\n"))
1512 self.ui.warn(_("No saved patch data found\n"))
1513 return 1
1513 return 1
1514 self.ui.warn(_("restoring status: %s\n") % lines[0])
1514 self.ui.warn(_("restoring status: %s\n") % lines[0])
1515 self.full_series = series
1515 self.full_series = series
1516 self.applied = applied
1516 self.applied = applied
1517 self.parse_series()
1517 self.parse_series()
1518 self.series_dirty = 1
1518 self.series_dirty = 1
1519 self.applied_dirty = 1
1519 self.applied_dirty = 1
1520 heads = repo.changelog.heads()
1520 heads = repo.changelog.heads()
1521 if delete:
1521 if delete:
1522 if rev not in heads:
1522 if rev not in heads:
1523 self.ui.warn(_("save entry has children, leaving it alone\n"))
1523 self.ui.warn(_("save entry has children, leaving it alone\n"))
1524 else:
1524 else:
1525 self.ui.warn(_("removing save entry %s\n") % short(rev))
1525 self.ui.warn(_("removing save entry %s\n") % short(rev))
1526 pp = repo.dirstate.parents()
1526 pp = repo.dirstate.parents()
1527 if rev in pp:
1527 if rev in pp:
1528 update = True
1528 update = True
1529 else:
1529 else:
1530 update = False
1530 update = False
1531 self.strip(repo, rev, update=update, backup='strip')
1531 self.strip(repo, rev, update=update, backup='strip')
1532 if qpp:
1532 if qpp:
1533 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1533 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1534 (short(qpp[0]), short(qpp[1])))
1534 (short(qpp[0]), short(qpp[1])))
1535 if qupdate:
1535 if qupdate:
1536 self.ui.status(_("queue directory updating\n"))
1536 self.ui.status(_("queue directory updating\n"))
1537 r = self.qrepo()
1537 r = self.qrepo()
1538 if not r:
1538 if not r:
1539 self.ui.warn(_("Unable to load queue repository\n"))
1539 self.ui.warn(_("Unable to load queue repository\n"))
1540 return 1
1540 return 1
1541 hg.clean(r, qpp[0])
1541 hg.clean(r, qpp[0])
1542
1542
1543 def save(self, repo, msg=None):
1543 def save(self, repo, msg=None):
1544 if not self.applied:
1544 if not self.applied:
1545 self.ui.warn(_("save: no patches applied, exiting\n"))
1545 self.ui.warn(_("save: no patches applied, exiting\n"))
1546 return 1
1546 return 1
1547 if self.issaveline(self.applied[-1]):
1547 if self.issaveline(self.applied[-1]):
1548 self.ui.warn(_("status is already saved\n"))
1548 self.ui.warn(_("status is already saved\n"))
1549 return 1
1549 return 1
1550
1550
1551 if not msg:
1551 if not msg:
1552 msg = _("hg patches saved state")
1552 msg = _("hg patches saved state")
1553 else:
1553 else:
1554 msg = "hg patches: " + msg.rstrip('\r\n')
1554 msg = "hg patches: " + msg.rstrip('\r\n')
1555 r = self.qrepo()
1555 r = self.qrepo()
1556 if r:
1556 if r:
1557 pp = r.dirstate.parents()
1557 pp = r.dirstate.parents()
1558 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1558 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1559 msg += "\n\nPatch Data:\n"
1559 msg += "\n\nPatch Data:\n"
1560 msg += ''.join('%s\n' % x for x in self.applied)
1560 msg += ''.join('%s\n' % x for x in self.applied)
1561 msg += ''.join(':%s\n' % x for x in self.full_series)
1561 msg += ''.join(':%s\n' % x for x in self.full_series)
1562 n = repo.commit(msg, force=True)
1562 n = repo.commit(msg, force=True)
1563 if not n:
1563 if not n:
1564 self.ui.warn(_("repo commit failed\n"))
1564 self.ui.warn(_("repo commit failed\n"))
1565 return 1
1565 return 1
1566 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1566 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1567 self.applied_dirty = 1
1567 self.applied_dirty = 1
1568 self.removeundo(repo)
1568 self.removeundo(repo)
1569
1569
1570 def full_series_end(self):
1570 def full_series_end(self):
1571 if self.applied:
1571 if self.applied:
1572 p = self.applied[-1].name
1572 p = self.applied[-1].name
1573 end = self.find_series(p)
1573 end = self.find_series(p)
1574 if end is None:
1574 if end is None:
1575 return len(self.full_series)
1575 return len(self.full_series)
1576 return end + 1
1576 return end + 1
1577 return 0
1577 return 0
1578
1578
1579 def series_end(self, all_patches=False):
1579 def series_end(self, all_patches=False):
1580 """If all_patches is False, return the index of the next pushable patch
1580 """If all_patches is False, return the index of the next pushable patch
1581 in the series, or the series length. If all_patches is True, return the
1581 in the series, or the series length. If all_patches is True, return the
1582 index of the first patch past the last applied one.
1582 index of the first patch past the last applied one.
1583 """
1583 """
1584 end = 0
1584 end = 0
1585 def next(start):
1585 def next(start):
1586 if all_patches or start >= len(self.series):
1586 if all_patches or start >= len(self.series):
1587 return start
1587 return start
1588 for i in xrange(start, len(self.series)):
1588 for i in xrange(start, len(self.series)):
1589 p, reason = self.pushable(i)
1589 p, reason = self.pushable(i)
1590 if p:
1590 if p:
1591 break
1591 break
1592 self.explain_pushable(i)
1592 self.explain_pushable(i)
1593 return i
1593 return i
1594 if self.applied:
1594 if self.applied:
1595 p = self.applied[-1].name
1595 p = self.applied[-1].name
1596 try:
1596 try:
1597 end = self.series.index(p)
1597 end = self.series.index(p)
1598 except ValueError:
1598 except ValueError:
1599 return 0
1599 return 0
1600 return next(end + 1)
1600 return next(end + 1)
1601 return next(end)
1601 return next(end)
1602
1602
1603 def appliedname(self, index):
1603 def appliedname(self, index):
1604 pname = self.applied[index].name
1604 pname = self.applied[index].name
1605 if not self.ui.verbose:
1605 if not self.ui.verbose:
1606 p = pname
1606 p = pname
1607 else:
1607 else:
1608 p = str(self.series.index(pname)) + " " + pname
1608 p = str(self.series.index(pname)) + " " + pname
1609 return p
1609 return p
1610
1610
1611 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1611 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1612 force=None, git=False):
1612 force=None, git=False):
1613 def checkseries(patchname):
1613 def checkseries(patchname):
1614 if patchname in self.series:
1614 if patchname in self.series:
1615 raise util.Abort(_('patch %s is already in the series file')
1615 raise util.Abort(_('patch %s is already in the series file')
1616 % patchname)
1616 % patchname)
1617 def checkfile(patchname):
1617 def checkfile(patchname):
1618 if not force and os.path.exists(self.join(patchname)):
1618 if not force and os.path.exists(self.join(patchname)):
1619 raise util.Abort(_('patch "%s" already exists')
1619 raise util.Abort(_('patch "%s" already exists')
1620 % patchname)
1620 % patchname)
1621
1621
1622 if rev:
1622 if rev:
1623 if files:
1623 if files:
1624 raise util.Abort(_('option "-r" not valid when importing '
1624 raise util.Abort(_('option "-r" not valid when importing '
1625 'files'))
1625 'files'))
1626 rev = cmdutil.revrange(repo, rev)
1626 rev = cmdutil.revrange(repo, rev)
1627 rev.sort(reverse=True)
1627 rev.sort(reverse=True)
1628 if (len(files) > 1 or len(rev) > 1) and patchname:
1628 if (len(files) > 1 or len(rev) > 1) and patchname:
1629 raise util.Abort(_('option "-n" not valid when importing multiple '
1629 raise util.Abort(_('option "-n" not valid when importing multiple '
1630 'patches'))
1630 'patches'))
1631 if rev:
1631 if rev:
1632 # If mq patches are applied, we can only import revisions
1632 # If mq patches are applied, we can only import revisions
1633 # that form a linear path to qbase.
1633 # that form a linear path to qbase.
1634 # Otherwise, they should form a linear path to a head.
1634 # Otherwise, they should form a linear path to a head.
1635 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1635 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1636 if len(heads) > 1:
1636 if len(heads) > 1:
1637 raise util.Abort(_('revision %d is the root of more than one '
1637 raise util.Abort(_('revision %d is the root of more than one '
1638 'branch') % rev[-1])
1638 'branch') % rev[-1])
1639 if self.applied:
1639 if self.applied:
1640 base = repo.changelog.node(rev[0])
1640 base = repo.changelog.node(rev[0])
1641 if base in [n.node for n in self.applied]:
1641 if base in [n.node for n in self.applied]:
1642 raise util.Abort(_('revision %d is already managed')
1642 raise util.Abort(_('revision %d is already managed')
1643 % rev[0])
1643 % rev[0])
1644 if heads != [self.applied[-1].node]:
1644 if heads != [self.applied[-1].node]:
1645 raise util.Abort(_('revision %d is not the parent of '
1645 raise util.Abort(_('revision %d is not the parent of '
1646 'the queue') % rev[0])
1646 'the queue') % rev[0])
1647 base = repo.changelog.rev(self.applied[0].node)
1647 base = repo.changelog.rev(self.applied[0].node)
1648 lastparent = repo.changelog.parentrevs(base)[0]
1648 lastparent = repo.changelog.parentrevs(base)[0]
1649 else:
1649 else:
1650 if heads != [repo.changelog.node(rev[0])]:
1650 if heads != [repo.changelog.node(rev[0])]:
1651 raise util.Abort(_('revision %d has unmanaged children')
1651 raise util.Abort(_('revision %d has unmanaged children')
1652 % rev[0])
1652 % rev[0])
1653 lastparent = None
1653 lastparent = None
1654
1654
1655 diffopts = self.diffopts({'git': git})
1655 diffopts = self.diffopts({'git': git})
1656 for r in rev:
1656 for r in rev:
1657 p1, p2 = repo.changelog.parentrevs(r)
1657 p1, p2 = repo.changelog.parentrevs(r)
1658 n = repo.changelog.node(r)
1658 n = repo.changelog.node(r)
1659 if p2 != nullrev:
1659 if p2 != nullrev:
1660 raise util.Abort(_('cannot import merge revision %d') % r)
1660 raise util.Abort(_('cannot import merge revision %d') % r)
1661 if lastparent and lastparent != r:
1661 if lastparent and lastparent != r:
1662 raise util.Abort(_('revision %d is not the parent of %d')
1662 raise util.Abort(_('revision %d is not the parent of %d')
1663 % (r, lastparent))
1663 % (r, lastparent))
1664 lastparent = p1
1664 lastparent = p1
1665
1665
1666 if not patchname:
1666 if not patchname:
1667 patchname = normname('%d.diff' % r)
1667 patchname = normname('%d.diff' % r)
1668 self.check_reserved_name(patchname)
1668 self.check_reserved_name(patchname)
1669 checkseries(patchname)
1669 checkseries(patchname)
1670 checkfile(patchname)
1670 checkfile(patchname)
1671 self.full_series.insert(0, patchname)
1671 self.full_series.insert(0, patchname)
1672
1672
1673 patchf = self.opener(patchname, "w")
1673 patchf = self.opener(patchname, "w")
1674 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1674 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1675 patchf.close()
1675 patchf.close()
1676
1676
1677 se = statusentry(n, patchname)
1677 se = statusentry(n, patchname)
1678 self.applied.insert(0, se)
1678 self.applied.insert(0, se)
1679
1679
1680 self.added.append(patchname)
1680 self.added.append(patchname)
1681 patchname = None
1681 patchname = None
1682 self.parse_series()
1682 self.parse_series()
1683 self.applied_dirty = 1
1683 self.applied_dirty = 1
1684 self.series_dirty = True
1684 self.series_dirty = True
1685
1685
1686 for i, filename in enumerate(files):
1686 for i, filename in enumerate(files):
1687 if existing:
1687 if existing:
1688 if filename == '-':
1688 if filename == '-':
1689 raise util.Abort(_('-e is incompatible with import from -'))
1689 raise util.Abort(_('-e is incompatible with import from -'))
1690 if not patchname:
1690 filename = normname(filename)
1691 patchname = normname(filename)
1691 self.check_reserved_name(filename)
1692 originpath = self.join(filename)
1693 if not os.path.isfile(originpath):
1694 raise util.Abort(_("patch %s does not exist") % filename)
1695
1696 if patchname:
1692 self.check_reserved_name(patchname)
1697 self.check_reserved_name(patchname)
1693 if not os.path.isfile(self.join(patchname)):
1698 checkfile(patchname)
1694 raise util.Abort(_("patch %s does not exist") % patchname)
1699
1700 self.ui.write(_('renaming %s to %s\n')
1701 % (filename, patchname))
1702 util.rename(originpath, self.join(patchname))
1703 else:
1704 patchname = filename
1705
1695 else:
1706 else:
1696 try:
1707 try:
1697 if filename == '-':
1708 if filename == '-':
1698 if not patchname:
1709 if not patchname:
1699 raise util.Abort(
1710 raise util.Abort(
1700 _('need --name to import a patch from -'))
1711 _('need --name to import a patch from -'))
1701 text = sys.stdin.read()
1712 text = sys.stdin.read()
1702 else:
1713 else:
1703 text = url.open(self.ui, filename).read()
1714 text = url.open(self.ui, filename).read()
1704 except (OSError, IOError):
1715 except (OSError, IOError):
1705 raise util.Abort(_("unable to read %s") % filename)
1716 raise util.Abort(_("unable to read %s") % filename)
1706 if not patchname:
1717 if not patchname:
1707 patchname = normname(os.path.basename(filename))
1718 patchname = normname(os.path.basename(filename))
1708 self.check_reserved_name(patchname)
1719 self.check_reserved_name(patchname)
1709 checkfile(patchname)
1720 checkfile(patchname)
1710 patchf = self.opener(patchname, "w")
1721 patchf = self.opener(patchname, "w")
1711 patchf.write(text)
1722 patchf.write(text)
1712 if not force:
1723 if not force:
1713 checkseries(patchname)
1724 checkseries(patchname)
1714 if patchname not in self.series:
1725 if patchname not in self.series:
1715 index = self.full_series_end() + i
1726 index = self.full_series_end() + i
1716 self.full_series[index:index] = [patchname]
1727 self.full_series[index:index] = [patchname]
1717 self.parse_series()
1728 self.parse_series()
1718 self.series_dirty = True
1729 self.series_dirty = True
1719 self.ui.warn(_("adding %s to series file\n") % patchname)
1730 self.ui.warn(_("adding %s to series file\n") % patchname)
1720 self.added.append(patchname)
1731 self.added.append(patchname)
1721 patchname = None
1732 patchname = None
1722
1733
1723 def delete(ui, repo, *patches, **opts):
1734 def delete(ui, repo, *patches, **opts):
1724 """remove patches from queue
1735 """remove patches from queue
1725
1736
1726 The patches must not be applied, and at least one patch is required. With
1737 The patches must not be applied, and at least one patch is required. With
1727 -k/--keep, the patch files are preserved in the patch directory.
1738 -k/--keep, the patch files are preserved in the patch directory.
1728
1739
1729 To stop managing a patch and move it into permanent history,
1740 To stop managing a patch and move it into permanent history,
1730 use the :hg:`qfinish` command."""
1741 use the :hg:`qfinish` command."""
1731 q = repo.mq
1742 q = repo.mq
1732 q.delete(repo, patches, opts)
1743 q.delete(repo, patches, opts)
1733 q.save_dirty()
1744 q.save_dirty()
1734 return 0
1745 return 0
1735
1746
1736 def applied(ui, repo, patch=None, **opts):
1747 def applied(ui, repo, patch=None, **opts):
1737 """print the patches already applied"""
1748 """print the patches already applied"""
1738
1749
1739 q = repo.mq
1750 q = repo.mq
1740 l = len(q.applied)
1751 l = len(q.applied)
1741
1752
1742 if patch:
1753 if patch:
1743 if patch not in q.series:
1754 if patch not in q.series:
1744 raise util.Abort(_("patch %s is not in series file") % patch)
1755 raise util.Abort(_("patch %s is not in series file") % patch)
1745 end = q.series.index(patch) + 1
1756 end = q.series.index(patch) + 1
1746 else:
1757 else:
1747 end = q.series_end(True)
1758 end = q.series_end(True)
1748
1759
1749 if opts.get('last') and not end:
1760 if opts.get('last') and not end:
1750 ui.write(_("no patches applied\n"))
1761 ui.write(_("no patches applied\n"))
1751 return 1
1762 return 1
1752 elif opts.get('last') and end == 1:
1763 elif opts.get('last') and end == 1:
1753 ui.write(_("only one patch applied\n"))
1764 ui.write(_("only one patch applied\n"))
1754 return 1
1765 return 1
1755 elif opts.get('last'):
1766 elif opts.get('last'):
1756 start = end - 2
1767 start = end - 2
1757 end = 1
1768 end = 1
1758 else:
1769 else:
1759 start = 0
1770 start = 0
1760
1771
1761 return q.qseries(repo, length=end, start=start, status='A',
1772 return q.qseries(repo, length=end, start=start, status='A',
1762 summary=opts.get('summary'))
1773 summary=opts.get('summary'))
1763
1774
1764 def unapplied(ui, repo, patch=None, **opts):
1775 def unapplied(ui, repo, patch=None, **opts):
1765 """print the patches not yet applied"""
1776 """print the patches not yet applied"""
1766
1777
1767 q = repo.mq
1778 q = repo.mq
1768 if patch:
1779 if patch:
1769 if patch not in q.series:
1780 if patch not in q.series:
1770 raise util.Abort(_("patch %s is not in series file") % patch)
1781 raise util.Abort(_("patch %s is not in series file") % patch)
1771 start = q.series.index(patch) + 1
1782 start = q.series.index(patch) + 1
1772 else:
1783 else:
1773 start = q.series_end(True)
1784 start = q.series_end(True)
1774
1785
1775 if start == len(q.series) and opts.get('first'):
1786 if start == len(q.series) and opts.get('first'):
1776 ui.write(_("all patches applied\n"))
1787 ui.write(_("all patches applied\n"))
1777 return 1
1788 return 1
1778
1789
1779 length = opts.get('first') and 1 or None
1790 length = opts.get('first') and 1 or None
1780 return q.qseries(repo, start=start, length=length, status='U',
1791 return q.qseries(repo, start=start, length=length, status='U',
1781 summary=opts.get('summary'))
1792 summary=opts.get('summary'))
1782
1793
1783 def qimport(ui, repo, *filename, **opts):
1794 def qimport(ui, repo, *filename, **opts):
1784 """import a patch
1795 """import a patch
1785
1796
1786 The patch is inserted into the series after the last applied
1797 The patch is inserted into the series after the last applied
1787 patch. If no patches have been applied, qimport prepends the patch
1798 patch. If no patches have been applied, qimport prepends the patch
1788 to the series.
1799 to the series.
1789
1800
1790 The patch will have the same name as its source file unless you
1801 The patch will have the same name as its source file unless you
1791 give it a new one with -n/--name.
1802 give it a new one with -n/--name.
1792
1803
1793 You can register an existing patch inside the patch directory with
1804 You can register an existing patch inside the patch directory with
1794 the -e/--existing flag.
1805 the -e/--existing flag.
1795
1806
1796 With -f/--force, an existing patch of the same name will be
1807 With -f/--force, an existing patch of the same name will be
1797 overwritten.
1808 overwritten.
1798
1809
1799 An existing changeset may be placed under mq control with -r/--rev
1810 An existing changeset may be placed under mq control with -r/--rev
1800 (e.g. qimport --rev tip -n patch will place tip under mq control).
1811 (e.g. qimport --rev tip -n patch will place tip under mq control).
1801 With -g/--git, patches imported with --rev will use the git diff
1812 With -g/--git, patches imported with --rev will use the git diff
1802 format. See the diffs help topic for information on why this is
1813 format. See the diffs help topic for information on why this is
1803 important for preserving rename/copy information and permission
1814 important for preserving rename/copy information and permission
1804 changes.
1815 changes.
1805
1816
1806 To import a patch from standard input, pass - as the patch file.
1817 To import a patch from standard input, pass - as the patch file.
1807 When importing from standard input, a patch name must be specified
1818 When importing from standard input, a patch name must be specified
1808 using the --name flag.
1819 using the --name flag.
1820
1821 To import an existing patch while renaming it::
1822
1823 hg qimport -e existing-patch -n new-name
1809 """
1824 """
1810 q = repo.mq
1825 q = repo.mq
1811 try:
1826 try:
1812 q.qimport(repo, filename, patchname=opts['name'],
1827 q.qimport(repo, filename, patchname=opts['name'],
1813 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1828 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1814 git=opts['git'])
1829 git=opts['git'])
1815 finally:
1830 finally:
1816 q.save_dirty()
1831 q.save_dirty()
1817
1832
1818 if opts.get('push') and not opts.get('rev'):
1833 if opts.get('push') and not opts.get('rev'):
1819 return q.push(repo, None)
1834 return q.push(repo, None)
1820 return 0
1835 return 0
1821
1836
1822 def qinit(ui, repo, create):
1837 def qinit(ui, repo, create):
1823 """initialize a new queue repository
1838 """initialize a new queue repository
1824
1839
1825 This command also creates a series file for ordering patches, and
1840 This command also creates a series file for ordering patches, and
1826 an mq-specific .hgignore file in the queue repository, to exclude
1841 an mq-specific .hgignore file in the queue repository, to exclude
1827 the status and guards files (these contain mostly transient state)."""
1842 the status and guards files (these contain mostly transient state)."""
1828 q = repo.mq
1843 q = repo.mq
1829 r = q.init(repo, create)
1844 r = q.init(repo, create)
1830 q.save_dirty()
1845 q.save_dirty()
1831 if r:
1846 if r:
1832 if not os.path.exists(r.wjoin('.hgignore')):
1847 if not os.path.exists(r.wjoin('.hgignore')):
1833 fp = r.wopener('.hgignore', 'w')
1848 fp = r.wopener('.hgignore', 'w')
1834 fp.write('^\\.hg\n')
1849 fp.write('^\\.hg\n')
1835 fp.write('^\\.mq\n')
1850 fp.write('^\\.mq\n')
1836 fp.write('syntax: glob\n')
1851 fp.write('syntax: glob\n')
1837 fp.write('status\n')
1852 fp.write('status\n')
1838 fp.write('guards\n')
1853 fp.write('guards\n')
1839 fp.close()
1854 fp.close()
1840 if not os.path.exists(r.wjoin('series')):
1855 if not os.path.exists(r.wjoin('series')):
1841 r.wopener('series', 'w').close()
1856 r.wopener('series', 'w').close()
1842 r[None].add(['.hgignore', 'series'])
1857 r[None].add(['.hgignore', 'series'])
1843 commands.add(ui, r)
1858 commands.add(ui, r)
1844 return 0
1859 return 0
1845
1860
1846 def init(ui, repo, **opts):
1861 def init(ui, repo, **opts):
1847 """init a new queue repository (DEPRECATED)
1862 """init a new queue repository (DEPRECATED)
1848
1863
1849 The queue repository is unversioned by default. If
1864 The queue repository is unversioned by default. If
1850 -c/--create-repo is specified, qinit will create a separate nested
1865 -c/--create-repo is specified, qinit will create a separate nested
1851 repository for patches (qinit -c may also be run later to convert
1866 repository for patches (qinit -c may also be run later to convert
1852 an unversioned patch repository into a versioned one). You can use
1867 an unversioned patch repository into a versioned one). You can use
1853 qcommit to commit changes to this queue repository.
1868 qcommit to commit changes to this queue repository.
1854
1869
1855 This command is deprecated. Without -c, it's implied by other relevant
1870 This command is deprecated. Without -c, it's implied by other relevant
1856 commands. With -c, use :hg:`init --mq` instead."""
1871 commands. With -c, use :hg:`init --mq` instead."""
1857 return qinit(ui, repo, create=opts['create_repo'])
1872 return qinit(ui, repo, create=opts['create_repo'])
1858
1873
1859 def clone(ui, source, dest=None, **opts):
1874 def clone(ui, source, dest=None, **opts):
1860 '''clone main and patch repository at same time
1875 '''clone main and patch repository at same time
1861
1876
1862 If source is local, destination will have no patches applied. If
1877 If source is local, destination will have no patches applied. If
1863 source is remote, this command can not check if patches are
1878 source is remote, this command can not check if patches are
1864 applied in source, so cannot guarantee that patches are not
1879 applied in source, so cannot guarantee that patches are not
1865 applied in destination. If you clone remote repository, be sure
1880 applied in destination. If you clone remote repository, be sure
1866 before that it has no patches applied.
1881 before that it has no patches applied.
1867
1882
1868 Source patch repository is looked for in <src>/.hg/patches by
1883 Source patch repository is looked for in <src>/.hg/patches by
1869 default. Use -p <url> to change.
1884 default. Use -p <url> to change.
1870
1885
1871 The patch directory must be a nested Mercurial repository, as
1886 The patch directory must be a nested Mercurial repository, as
1872 would be created by :hg:`init --mq`.
1887 would be created by :hg:`init --mq`.
1873 '''
1888 '''
1874 def patchdir(repo):
1889 def patchdir(repo):
1875 url = repo.url()
1890 url = repo.url()
1876 if url.endswith('/'):
1891 if url.endswith('/'):
1877 url = url[:-1]
1892 url = url[:-1]
1878 return url + '/.hg/patches'
1893 return url + '/.hg/patches'
1879 if dest is None:
1894 if dest is None:
1880 dest = hg.defaultdest(source)
1895 dest = hg.defaultdest(source)
1881 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
1896 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
1882 if opts['patches']:
1897 if opts['patches']:
1883 patchespath = ui.expandpath(opts['patches'])
1898 patchespath = ui.expandpath(opts['patches'])
1884 else:
1899 else:
1885 patchespath = patchdir(sr)
1900 patchespath = patchdir(sr)
1886 try:
1901 try:
1887 hg.repository(ui, patchespath)
1902 hg.repository(ui, patchespath)
1888 except error.RepoError:
1903 except error.RepoError:
1889 raise util.Abort(_('versioned patch repository not found'
1904 raise util.Abort(_('versioned patch repository not found'
1890 ' (see init --mq)'))
1905 ' (see init --mq)'))
1891 qbase, destrev = None, None
1906 qbase, destrev = None, None
1892 if sr.local():
1907 if sr.local():
1893 if sr.mq.applied:
1908 if sr.mq.applied:
1894 qbase = sr.mq.applied[0].node
1909 qbase = sr.mq.applied[0].node
1895 if not hg.islocal(dest):
1910 if not hg.islocal(dest):
1896 heads = set(sr.heads())
1911 heads = set(sr.heads())
1897 destrev = list(heads.difference(sr.heads(qbase)))
1912 destrev = list(heads.difference(sr.heads(qbase)))
1898 destrev.append(sr.changelog.parents(qbase)[0])
1913 destrev.append(sr.changelog.parents(qbase)[0])
1899 elif sr.capable('lookup'):
1914 elif sr.capable('lookup'):
1900 try:
1915 try:
1901 qbase = sr.lookup('qbase')
1916 qbase = sr.lookup('qbase')
1902 except error.RepoError:
1917 except error.RepoError:
1903 pass
1918 pass
1904 ui.note(_('cloning main repository\n'))
1919 ui.note(_('cloning main repository\n'))
1905 sr, dr = hg.clone(ui, sr.url(), dest,
1920 sr, dr = hg.clone(ui, sr.url(), dest,
1906 pull=opts['pull'],
1921 pull=opts['pull'],
1907 rev=destrev,
1922 rev=destrev,
1908 update=False,
1923 update=False,
1909 stream=opts['uncompressed'])
1924 stream=opts['uncompressed'])
1910 ui.note(_('cloning patch repository\n'))
1925 ui.note(_('cloning patch repository\n'))
1911 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1926 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1912 pull=opts['pull'], update=not opts['noupdate'],
1927 pull=opts['pull'], update=not opts['noupdate'],
1913 stream=opts['uncompressed'])
1928 stream=opts['uncompressed'])
1914 if dr.local():
1929 if dr.local():
1915 if qbase:
1930 if qbase:
1916 ui.note(_('stripping applied patches from destination '
1931 ui.note(_('stripping applied patches from destination '
1917 'repository\n'))
1932 'repository\n'))
1918 dr.mq.strip(dr, qbase, update=False, backup=None)
1933 dr.mq.strip(dr, qbase, update=False, backup=None)
1919 if not opts['noupdate']:
1934 if not opts['noupdate']:
1920 ui.note(_('updating destination repository\n'))
1935 ui.note(_('updating destination repository\n'))
1921 hg.update(dr, dr.changelog.tip())
1936 hg.update(dr, dr.changelog.tip())
1922
1937
1923 def commit(ui, repo, *pats, **opts):
1938 def commit(ui, repo, *pats, **opts):
1924 """commit changes in the queue repository (DEPRECATED)
1939 """commit changes in the queue repository (DEPRECATED)
1925
1940
1926 This command is deprecated; use :hg:`commit --mq` instead."""
1941 This command is deprecated; use :hg:`commit --mq` instead."""
1927 q = repo.mq
1942 q = repo.mq
1928 r = q.qrepo()
1943 r = q.qrepo()
1929 if not r:
1944 if not r:
1930 raise util.Abort('no queue repository')
1945 raise util.Abort('no queue repository')
1931 commands.commit(r.ui, r, *pats, **opts)
1946 commands.commit(r.ui, r, *pats, **opts)
1932
1947
1933 def series(ui, repo, **opts):
1948 def series(ui, repo, **opts):
1934 """print the entire series file"""
1949 """print the entire series file"""
1935 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1950 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1936 return 0
1951 return 0
1937
1952
1938 def top(ui, repo, **opts):
1953 def top(ui, repo, **opts):
1939 """print the name of the current patch"""
1954 """print the name of the current patch"""
1940 q = repo.mq
1955 q = repo.mq
1941 t = q.applied and q.series_end(True) or 0
1956 t = q.applied and q.series_end(True) or 0
1942 if t:
1957 if t:
1943 return q.qseries(repo, start=t - 1, length=1, status='A',
1958 return q.qseries(repo, start=t - 1, length=1, status='A',
1944 summary=opts.get('summary'))
1959 summary=opts.get('summary'))
1945 else:
1960 else:
1946 ui.write(_("no patches applied\n"))
1961 ui.write(_("no patches applied\n"))
1947 return 1
1962 return 1
1948
1963
1949 def next(ui, repo, **opts):
1964 def next(ui, repo, **opts):
1950 """print the name of the next patch"""
1965 """print the name of the next patch"""
1951 q = repo.mq
1966 q = repo.mq
1952 end = q.series_end()
1967 end = q.series_end()
1953 if end == len(q.series):
1968 if end == len(q.series):
1954 ui.write(_("all patches applied\n"))
1969 ui.write(_("all patches applied\n"))
1955 return 1
1970 return 1
1956 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1971 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1957
1972
1958 def prev(ui, repo, **opts):
1973 def prev(ui, repo, **opts):
1959 """print the name of the previous patch"""
1974 """print the name of the previous patch"""
1960 q = repo.mq
1975 q = repo.mq
1961 l = len(q.applied)
1976 l = len(q.applied)
1962 if l == 1:
1977 if l == 1:
1963 ui.write(_("only one patch applied\n"))
1978 ui.write(_("only one patch applied\n"))
1964 return 1
1979 return 1
1965 if not l:
1980 if not l:
1966 ui.write(_("no patches applied\n"))
1981 ui.write(_("no patches applied\n"))
1967 return 1
1982 return 1
1968 return q.qseries(repo, start=l - 2, length=1, status='A',
1983 return q.qseries(repo, start=l - 2, length=1, status='A',
1969 summary=opts.get('summary'))
1984 summary=opts.get('summary'))
1970
1985
1971 def setupheaderopts(ui, opts):
1986 def setupheaderopts(ui, opts):
1972 if not opts.get('user') and opts.get('currentuser'):
1987 if not opts.get('user') and opts.get('currentuser'):
1973 opts['user'] = ui.username()
1988 opts['user'] = ui.username()
1974 if not opts.get('date') and opts.get('currentdate'):
1989 if not opts.get('date') and opts.get('currentdate'):
1975 opts['date'] = "%d %d" % util.makedate()
1990 opts['date'] = "%d %d" % util.makedate()
1976
1991
1977 def new(ui, repo, patch, *args, **opts):
1992 def new(ui, repo, patch, *args, **opts):
1978 """create a new patch
1993 """create a new patch
1979
1994
1980 qnew creates a new patch on top of the currently-applied patch (if
1995 qnew creates a new patch on top of the currently-applied patch (if
1981 any). The patch will be initialized with any outstanding changes
1996 any). The patch will be initialized with any outstanding changes
1982 in the working directory. You may also use -I/--include,
1997 in the working directory. You may also use -I/--include,
1983 -X/--exclude, and/or a list of files after the patch name to add
1998 -X/--exclude, and/or a list of files after the patch name to add
1984 only changes to matching files to the new patch, leaving the rest
1999 only changes to matching files to the new patch, leaving the rest
1985 as uncommitted modifications.
2000 as uncommitted modifications.
1986
2001
1987 -u/--user and -d/--date can be used to set the (given) user and
2002 -u/--user and -d/--date can be used to set the (given) user and
1988 date, respectively. -U/--currentuser and -D/--currentdate set user
2003 date, respectively. -U/--currentuser and -D/--currentdate set user
1989 to current user and date to current date.
2004 to current user and date to current date.
1990
2005
1991 -e/--edit, -m/--message or -l/--logfile set the patch header as
2006 -e/--edit, -m/--message or -l/--logfile set the patch header as
1992 well as the commit message. If none is specified, the header is
2007 well as the commit message. If none is specified, the header is
1993 empty and the commit message is '[mq]: PATCH'.
2008 empty and the commit message is '[mq]: PATCH'.
1994
2009
1995 Use the -g/--git option to keep the patch in the git extended diff
2010 Use the -g/--git option to keep the patch in the git extended diff
1996 format. Read the diffs help topic for more information on why this
2011 format. Read the diffs help topic for more information on why this
1997 is important for preserving permission changes and copy/rename
2012 is important for preserving permission changes and copy/rename
1998 information.
2013 information.
1999 """
2014 """
2000 msg = cmdutil.logmessage(opts)
2015 msg = cmdutil.logmessage(opts)
2001 def getmsg():
2016 def getmsg():
2002 return ui.edit(msg, opts['user'] or ui.username())
2017 return ui.edit(msg, opts['user'] or ui.username())
2003 q = repo.mq
2018 q = repo.mq
2004 opts['msg'] = msg
2019 opts['msg'] = msg
2005 if opts.get('edit'):
2020 if opts.get('edit'):
2006 opts['msg'] = getmsg
2021 opts['msg'] = getmsg
2007 else:
2022 else:
2008 opts['msg'] = msg
2023 opts['msg'] = msg
2009 setupheaderopts(ui, opts)
2024 setupheaderopts(ui, opts)
2010 q.new(repo, patch, *args, **opts)
2025 q.new(repo, patch, *args, **opts)
2011 q.save_dirty()
2026 q.save_dirty()
2012 return 0
2027 return 0
2013
2028
2014 def refresh(ui, repo, *pats, **opts):
2029 def refresh(ui, repo, *pats, **opts):
2015 """update the current patch
2030 """update the current patch
2016
2031
2017 If any file patterns are provided, the refreshed patch will
2032 If any file patterns are provided, the refreshed patch will
2018 contain only the modifications that match those patterns; the
2033 contain only the modifications that match those patterns; the
2019 remaining modifications will remain in the working directory.
2034 remaining modifications will remain in the working directory.
2020
2035
2021 If -s/--short is specified, files currently included in the patch
2036 If -s/--short is specified, files currently included in the patch
2022 will be refreshed just like matched files and remain in the patch.
2037 will be refreshed just like matched files and remain in the patch.
2023
2038
2024 hg add/remove/copy/rename work as usual, though you might want to
2039 hg add/remove/copy/rename work as usual, though you might want to
2025 use git-style patches (-g/--git or [diff] git=1) to track copies
2040 use git-style patches (-g/--git or [diff] git=1) to track copies
2026 and renames. See the diffs help topic for more information on the
2041 and renames. See the diffs help topic for more information on the
2027 git diff format.
2042 git diff format.
2028 """
2043 """
2029 q = repo.mq
2044 q = repo.mq
2030 message = cmdutil.logmessage(opts)
2045 message = cmdutil.logmessage(opts)
2031 if opts['edit']:
2046 if opts['edit']:
2032 if not q.applied:
2047 if not q.applied:
2033 ui.write(_("no patches applied\n"))
2048 ui.write(_("no patches applied\n"))
2034 return 1
2049 return 1
2035 if message:
2050 if message:
2036 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2051 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2037 patch = q.applied[-1].name
2052 patch = q.applied[-1].name
2038 ph = patchheader(q.join(patch), q.plainmode)
2053 ph = patchheader(q.join(patch), q.plainmode)
2039 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2054 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2040 setupheaderopts(ui, opts)
2055 setupheaderopts(ui, opts)
2041 ret = q.refresh(repo, pats, msg=message, **opts)
2056 ret = q.refresh(repo, pats, msg=message, **opts)
2042 q.save_dirty()
2057 q.save_dirty()
2043 return ret
2058 return ret
2044
2059
2045 def diff(ui, repo, *pats, **opts):
2060 def diff(ui, repo, *pats, **opts):
2046 """diff of the current patch and subsequent modifications
2061 """diff of the current patch and subsequent modifications
2047
2062
2048 Shows a diff which includes the current patch as well as any
2063 Shows a diff which includes the current patch as well as any
2049 changes which have been made in the working directory since the
2064 changes which have been made in the working directory since the
2050 last refresh (thus showing what the current patch would become
2065 last refresh (thus showing what the current patch would become
2051 after a qrefresh).
2066 after a qrefresh).
2052
2067
2053 Use :hg:`diff` if you only want to see the changes made since the
2068 Use :hg:`diff` if you only want to see the changes made since the
2054 last qrefresh, or :hg:`export qtip` if you want to see changes
2069 last qrefresh, or :hg:`export qtip` if you want to see changes
2055 made by the current patch without including changes made since the
2070 made by the current patch without including changes made since the
2056 qrefresh.
2071 qrefresh.
2057 """
2072 """
2058 repo.mq.diff(repo, pats, opts)
2073 repo.mq.diff(repo, pats, opts)
2059 return 0
2074 return 0
2060
2075
2061 def fold(ui, repo, *files, **opts):
2076 def fold(ui, repo, *files, **opts):
2062 """fold the named patches into the current patch
2077 """fold the named patches into the current patch
2063
2078
2064 Patches must not yet be applied. Each patch will be successively
2079 Patches must not yet be applied. Each patch will be successively
2065 applied to the current patch in the order given. If all the
2080 applied to the current patch in the order given. If all the
2066 patches apply successfully, the current patch will be refreshed
2081 patches apply successfully, the current patch will be refreshed
2067 with the new cumulative patch, and the folded patches will be
2082 with the new cumulative patch, and the folded patches will be
2068 deleted. With -k/--keep, the folded patch files will not be
2083 deleted. With -k/--keep, the folded patch files will not be
2069 removed afterwards.
2084 removed afterwards.
2070
2085
2071 The header for each folded patch will be concatenated with the
2086 The header for each folded patch will be concatenated with the
2072 current patch header, separated by a line of '* * *'."""
2087 current patch header, separated by a line of '* * *'."""
2073
2088
2074 q = repo.mq
2089 q = repo.mq
2075
2090
2076 if not files:
2091 if not files:
2077 raise util.Abort(_('qfold requires at least one patch name'))
2092 raise util.Abort(_('qfold requires at least one patch name'))
2078 if not q.check_toppatch(repo)[0]:
2093 if not q.check_toppatch(repo)[0]:
2079 raise util.Abort(_('No patches applied'))
2094 raise util.Abort(_('No patches applied'))
2080 q.check_localchanges(repo)
2095 q.check_localchanges(repo)
2081
2096
2082 message = cmdutil.logmessage(opts)
2097 message = cmdutil.logmessage(opts)
2083 if opts['edit']:
2098 if opts['edit']:
2084 if message:
2099 if message:
2085 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2100 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2086
2101
2087 parent = q.lookup('qtip')
2102 parent = q.lookup('qtip')
2088 patches = []
2103 patches = []
2089 messages = []
2104 messages = []
2090 for f in files:
2105 for f in files:
2091 p = q.lookup(f)
2106 p = q.lookup(f)
2092 if p in patches or p == parent:
2107 if p in patches or p == parent:
2093 ui.warn(_('Skipping already folded patch %s') % p)
2108 ui.warn(_('Skipping already folded patch %s') % p)
2094 if q.isapplied(p):
2109 if q.isapplied(p):
2095 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2110 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2096 patches.append(p)
2111 patches.append(p)
2097
2112
2098 for p in patches:
2113 for p in patches:
2099 if not message:
2114 if not message:
2100 ph = patchheader(q.join(p), q.plainmode)
2115 ph = patchheader(q.join(p), q.plainmode)
2101 if ph.message:
2116 if ph.message:
2102 messages.append(ph.message)
2117 messages.append(ph.message)
2103 pf = q.join(p)
2118 pf = q.join(p)
2104 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2119 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2105 if not patchsuccess:
2120 if not patchsuccess:
2106 raise util.Abort(_('Error folding patch %s') % p)
2121 raise util.Abort(_('Error folding patch %s') % p)
2107 patch.updatedir(ui, repo, files)
2122 patch.updatedir(ui, repo, files)
2108
2123
2109 if not message:
2124 if not message:
2110 ph = patchheader(q.join(parent), q.plainmode)
2125 ph = patchheader(q.join(parent), q.plainmode)
2111 message, user = ph.message, ph.user
2126 message, user = ph.message, ph.user
2112 for msg in messages:
2127 for msg in messages:
2113 message.append('* * *')
2128 message.append('* * *')
2114 message.extend(msg)
2129 message.extend(msg)
2115 message = '\n'.join(message)
2130 message = '\n'.join(message)
2116
2131
2117 if opts['edit']:
2132 if opts['edit']:
2118 message = ui.edit(message, user or ui.username())
2133 message = ui.edit(message, user or ui.username())
2119
2134
2120 diffopts = q.patchopts(q.diffopts(), *patches)
2135 diffopts = q.patchopts(q.diffopts(), *patches)
2121 q.refresh(repo, msg=message, git=diffopts.git)
2136 q.refresh(repo, msg=message, git=diffopts.git)
2122 q.delete(repo, patches, opts)
2137 q.delete(repo, patches, opts)
2123 q.save_dirty()
2138 q.save_dirty()
2124
2139
2125 def goto(ui, repo, patch, **opts):
2140 def goto(ui, repo, patch, **opts):
2126 '''push or pop patches until named patch is at top of stack'''
2141 '''push or pop patches until named patch is at top of stack'''
2127 q = repo.mq
2142 q = repo.mq
2128 patch = q.lookup(patch)
2143 patch = q.lookup(patch)
2129 if q.isapplied(patch):
2144 if q.isapplied(patch):
2130 ret = q.pop(repo, patch, force=opts['force'])
2145 ret = q.pop(repo, patch, force=opts['force'])
2131 else:
2146 else:
2132 ret = q.push(repo, patch, force=opts['force'])
2147 ret = q.push(repo, patch, force=opts['force'])
2133 q.save_dirty()
2148 q.save_dirty()
2134 return ret
2149 return ret
2135
2150
2136 def guard(ui, repo, *args, **opts):
2151 def guard(ui, repo, *args, **opts):
2137 '''set or print guards for a patch
2152 '''set or print guards for a patch
2138
2153
2139 Guards control whether a patch can be pushed. A patch with no
2154 Guards control whether a patch can be pushed. A patch with no
2140 guards is always pushed. A patch with a positive guard ("+foo") is
2155 guards is always pushed. A patch with a positive guard ("+foo") is
2141 pushed only if the :hg:`qselect` command has activated it. A patch with
2156 pushed only if the :hg:`qselect` command has activated it. A patch with
2142 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2157 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2143 has activated it.
2158 has activated it.
2144
2159
2145 With no arguments, print the currently active guards.
2160 With no arguments, print the currently active guards.
2146 With arguments, set guards for the named patch.
2161 With arguments, set guards for the named patch.
2147 NOTE: Specifying negative guards now requires '--'.
2162 NOTE: Specifying negative guards now requires '--'.
2148
2163
2149 To set guards on another patch::
2164 To set guards on another patch::
2150
2165
2151 hg qguard other.patch -- +2.6.17 -stable
2166 hg qguard other.patch -- +2.6.17 -stable
2152 '''
2167 '''
2153 def status(idx):
2168 def status(idx):
2154 guards = q.series_guards[idx] or ['unguarded']
2169 guards = q.series_guards[idx] or ['unguarded']
2155 ui.write('%s: ' % ui.label(q.series[idx], 'qguard.patch'))
2170 ui.write('%s: ' % ui.label(q.series[idx], 'qguard.patch'))
2156 for i, guard in enumerate(guards):
2171 for i, guard in enumerate(guards):
2157 if guard.startswith('+'):
2172 if guard.startswith('+'):
2158 ui.write(guard, label='qguard.positive')
2173 ui.write(guard, label='qguard.positive')
2159 elif guard.startswith('-'):
2174 elif guard.startswith('-'):
2160 ui.write(guard, label='qguard.negative')
2175 ui.write(guard, label='qguard.negative')
2161 else:
2176 else:
2162 ui.write(guard, label='qguard.unguarded')
2177 ui.write(guard, label='qguard.unguarded')
2163 if i != len(guards) - 1:
2178 if i != len(guards) - 1:
2164 ui.write(' ')
2179 ui.write(' ')
2165 ui.write('\n')
2180 ui.write('\n')
2166 q = repo.mq
2181 q = repo.mq
2167 patch = None
2182 patch = None
2168 args = list(args)
2183 args = list(args)
2169 if opts['list']:
2184 if opts['list']:
2170 if args or opts['none']:
2185 if args or opts['none']:
2171 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2186 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2172 for i in xrange(len(q.series)):
2187 for i in xrange(len(q.series)):
2173 status(i)
2188 status(i)
2174 return
2189 return
2175 if not args or args[0][0:1] in '-+':
2190 if not args or args[0][0:1] in '-+':
2176 if not q.applied:
2191 if not q.applied:
2177 raise util.Abort(_('no patches applied'))
2192 raise util.Abort(_('no patches applied'))
2178 patch = q.applied[-1].name
2193 patch = q.applied[-1].name
2179 if patch is None and args[0][0:1] not in '-+':
2194 if patch is None and args[0][0:1] not in '-+':
2180 patch = args.pop(0)
2195 patch = args.pop(0)
2181 if patch is None:
2196 if patch is None:
2182 raise util.Abort(_('no patch to work with'))
2197 raise util.Abort(_('no patch to work with'))
2183 if args or opts['none']:
2198 if args or opts['none']:
2184 idx = q.find_series(patch)
2199 idx = q.find_series(patch)
2185 if idx is None:
2200 if idx is None:
2186 raise util.Abort(_('no patch named %s') % patch)
2201 raise util.Abort(_('no patch named %s') % patch)
2187 q.set_guards(idx, args)
2202 q.set_guards(idx, args)
2188 q.save_dirty()
2203 q.save_dirty()
2189 else:
2204 else:
2190 status(q.series.index(q.lookup(patch)))
2205 status(q.series.index(q.lookup(patch)))
2191
2206
2192 def header(ui, repo, patch=None):
2207 def header(ui, repo, patch=None):
2193 """print the header of the topmost or specified patch"""
2208 """print the header of the topmost or specified patch"""
2194 q = repo.mq
2209 q = repo.mq
2195
2210
2196 if patch:
2211 if patch:
2197 patch = q.lookup(patch)
2212 patch = q.lookup(patch)
2198 else:
2213 else:
2199 if not q.applied:
2214 if not q.applied:
2200 ui.write(_('no patches applied\n'))
2215 ui.write(_('no patches applied\n'))
2201 return 1
2216 return 1
2202 patch = q.lookup('qtip')
2217 patch = q.lookup('qtip')
2203 ph = patchheader(q.join(patch), q.plainmode)
2218 ph = patchheader(q.join(patch), q.plainmode)
2204
2219
2205 ui.write('\n'.join(ph.message) + '\n')
2220 ui.write('\n'.join(ph.message) + '\n')
2206
2221
2207 def lastsavename(path):
2222 def lastsavename(path):
2208 (directory, base) = os.path.split(path)
2223 (directory, base) = os.path.split(path)
2209 names = os.listdir(directory)
2224 names = os.listdir(directory)
2210 namere = re.compile("%s.([0-9]+)" % base)
2225 namere = re.compile("%s.([0-9]+)" % base)
2211 maxindex = None
2226 maxindex = None
2212 maxname = None
2227 maxname = None
2213 for f in names:
2228 for f in names:
2214 m = namere.match(f)
2229 m = namere.match(f)
2215 if m:
2230 if m:
2216 index = int(m.group(1))
2231 index = int(m.group(1))
2217 if maxindex is None or index > maxindex:
2232 if maxindex is None or index > maxindex:
2218 maxindex = index
2233 maxindex = index
2219 maxname = f
2234 maxname = f
2220 if maxname:
2235 if maxname:
2221 return (os.path.join(directory, maxname), maxindex)
2236 return (os.path.join(directory, maxname), maxindex)
2222 return (None, None)
2237 return (None, None)
2223
2238
2224 def savename(path):
2239 def savename(path):
2225 (last, index) = lastsavename(path)
2240 (last, index) = lastsavename(path)
2226 if last is None:
2241 if last is None:
2227 index = 0
2242 index = 0
2228 newpath = path + ".%d" % (index + 1)
2243 newpath = path + ".%d" % (index + 1)
2229 return newpath
2244 return newpath
2230
2245
2231 def push(ui, repo, patch=None, **opts):
2246 def push(ui, repo, patch=None, **opts):
2232 """push the next patch onto the stack
2247 """push the next patch onto the stack
2233
2248
2234 When -f/--force is applied, all local changes in patched files
2249 When -f/--force is applied, all local changes in patched files
2235 will be lost.
2250 will be lost.
2236 """
2251 """
2237 q = repo.mq
2252 q = repo.mq
2238 mergeq = None
2253 mergeq = None
2239
2254
2240 if opts['merge']:
2255 if opts['merge']:
2241 if opts['name']:
2256 if opts['name']:
2242 newpath = repo.join(opts['name'])
2257 newpath = repo.join(opts['name'])
2243 else:
2258 else:
2244 newpath, i = lastsavename(q.path)
2259 newpath, i = lastsavename(q.path)
2245 if not newpath:
2260 if not newpath:
2246 ui.warn(_("no saved queues found, please use -n\n"))
2261 ui.warn(_("no saved queues found, please use -n\n"))
2247 return 1
2262 return 1
2248 mergeq = queue(ui, repo.join(""), newpath)
2263 mergeq = queue(ui, repo.join(""), newpath)
2249 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2264 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2250 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2265 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2251 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'))
2266 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'))
2252 return ret
2267 return ret
2253
2268
2254 def pop(ui, repo, patch=None, **opts):
2269 def pop(ui, repo, patch=None, **opts):
2255 """pop the current patch off the stack
2270 """pop the current patch off the stack
2256
2271
2257 By default, pops off the top of the patch stack. If given a patch
2272 By default, pops off the top of the patch stack. If given a patch
2258 name, keeps popping off patches until the named patch is at the
2273 name, keeps popping off patches until the named patch is at the
2259 top of the stack.
2274 top of the stack.
2260 """
2275 """
2261 localupdate = True
2276 localupdate = True
2262 if opts['name']:
2277 if opts['name']:
2263 q = queue(ui, repo.join(""), repo.join(opts['name']))
2278 q = queue(ui, repo.join(""), repo.join(opts['name']))
2264 ui.warn(_('using patch queue: %s\n') % q.path)
2279 ui.warn(_('using patch queue: %s\n') % q.path)
2265 localupdate = False
2280 localupdate = False
2266 else:
2281 else:
2267 q = repo.mq
2282 q = repo.mq
2268 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2283 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2269 all=opts['all'])
2284 all=opts['all'])
2270 q.save_dirty()
2285 q.save_dirty()
2271 return ret
2286 return ret
2272
2287
2273 def rename(ui, repo, patch, name=None, **opts):
2288 def rename(ui, repo, patch, name=None, **opts):
2274 """rename a patch
2289 """rename a patch
2275
2290
2276 With one argument, renames the current patch to PATCH1.
2291 With one argument, renames the current patch to PATCH1.
2277 With two arguments, renames PATCH1 to PATCH2."""
2292 With two arguments, renames PATCH1 to PATCH2."""
2278
2293
2279 q = repo.mq
2294 q = repo.mq
2280
2295
2281 if not name:
2296 if not name:
2282 name = patch
2297 name = patch
2283 patch = None
2298 patch = None
2284
2299
2285 if patch:
2300 if patch:
2286 patch = q.lookup(patch)
2301 patch = q.lookup(patch)
2287 else:
2302 else:
2288 if not q.applied:
2303 if not q.applied:
2289 ui.write(_('no patches applied\n'))
2304 ui.write(_('no patches applied\n'))
2290 return
2305 return
2291 patch = q.lookup('qtip')
2306 patch = q.lookup('qtip')
2292 absdest = q.join(name)
2307 absdest = q.join(name)
2293 if os.path.isdir(absdest):
2308 if os.path.isdir(absdest):
2294 name = normname(os.path.join(name, os.path.basename(patch)))
2309 name = normname(os.path.join(name, os.path.basename(patch)))
2295 absdest = q.join(name)
2310 absdest = q.join(name)
2296 if os.path.exists(absdest):
2311 if os.path.exists(absdest):
2297 raise util.Abort(_('%s already exists') % absdest)
2312 raise util.Abort(_('%s already exists') % absdest)
2298
2313
2299 if name in q.series:
2314 if name in q.series:
2300 raise util.Abort(
2315 raise util.Abort(
2301 _('A patch named %s already exists in the series file') % name)
2316 _('A patch named %s already exists in the series file') % name)
2302
2317
2303 ui.note(_('renaming %s to %s\n') % (patch, name))
2318 ui.note(_('renaming %s to %s\n') % (patch, name))
2304 i = q.find_series(patch)
2319 i = q.find_series(patch)
2305 guards = q.guard_re.findall(q.full_series[i])
2320 guards = q.guard_re.findall(q.full_series[i])
2306 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2321 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2307 q.parse_series()
2322 q.parse_series()
2308 q.series_dirty = 1
2323 q.series_dirty = 1
2309
2324
2310 info = q.isapplied(patch)
2325 info = q.isapplied(patch)
2311 if info:
2326 if info:
2312 q.applied[info[0]] = statusentry(info[1], name)
2327 q.applied[info[0]] = statusentry(info[1], name)
2313 q.applied_dirty = 1
2328 q.applied_dirty = 1
2314
2329
2315 destdir = os.path.dirname(absdest)
2330 destdir = os.path.dirname(absdest)
2316 if not os.path.isdir(destdir):
2331 if not os.path.isdir(destdir):
2317 os.makedirs(destdir)
2332 os.makedirs(destdir)
2318 util.rename(q.join(patch), absdest)
2333 util.rename(q.join(patch), absdest)
2319 r = q.qrepo()
2334 r = q.qrepo()
2320 if r:
2335 if r:
2321 wctx = r[None]
2336 wctx = r[None]
2322 wlock = r.wlock()
2337 wlock = r.wlock()
2323 try:
2338 try:
2324 if r.dirstate[patch] == 'a':
2339 if r.dirstate[patch] == 'a':
2325 r.dirstate.forget(patch)
2340 r.dirstate.forget(patch)
2326 r.dirstate.add(name)
2341 r.dirstate.add(name)
2327 else:
2342 else:
2328 if r.dirstate[name] == 'r':
2343 if r.dirstate[name] == 'r':
2329 wctx.undelete([name])
2344 wctx.undelete([name])
2330 wctx.copy(patch, name)
2345 wctx.copy(patch, name)
2331 wctx.remove([patch], False)
2346 wctx.remove([patch], False)
2332 finally:
2347 finally:
2333 wlock.release()
2348 wlock.release()
2334
2349
2335 q.save_dirty()
2350 q.save_dirty()
2336
2351
2337 def restore(ui, repo, rev, **opts):
2352 def restore(ui, repo, rev, **opts):
2338 """restore the queue state saved by a revision (DEPRECATED)
2353 """restore the queue state saved by a revision (DEPRECATED)
2339
2354
2340 This command is deprecated, use rebase --mq instead."""
2355 This command is deprecated, use rebase --mq instead."""
2341 rev = repo.lookup(rev)
2356 rev = repo.lookup(rev)
2342 q = repo.mq
2357 q = repo.mq
2343 q.restore(repo, rev, delete=opts['delete'],
2358 q.restore(repo, rev, delete=opts['delete'],
2344 qupdate=opts['update'])
2359 qupdate=opts['update'])
2345 q.save_dirty()
2360 q.save_dirty()
2346 return 0
2361 return 0
2347
2362
2348 def save(ui, repo, **opts):
2363 def save(ui, repo, **opts):
2349 """save current queue state (DEPRECATED)
2364 """save current queue state (DEPRECATED)
2350
2365
2351 This command is deprecated, use rebase --mq instead."""
2366 This command is deprecated, use rebase --mq instead."""
2352 q = repo.mq
2367 q = repo.mq
2353 message = cmdutil.logmessage(opts)
2368 message = cmdutil.logmessage(opts)
2354 ret = q.save(repo, msg=message)
2369 ret = q.save(repo, msg=message)
2355 if ret:
2370 if ret:
2356 return ret
2371 return ret
2357 q.save_dirty()
2372 q.save_dirty()
2358 if opts['copy']:
2373 if opts['copy']:
2359 path = q.path
2374 path = q.path
2360 if opts['name']:
2375 if opts['name']:
2361 newpath = os.path.join(q.basepath, opts['name'])
2376 newpath = os.path.join(q.basepath, opts['name'])
2362 if os.path.exists(newpath):
2377 if os.path.exists(newpath):
2363 if not os.path.isdir(newpath):
2378 if not os.path.isdir(newpath):
2364 raise util.Abort(_('destination %s exists and is not '
2379 raise util.Abort(_('destination %s exists and is not '
2365 'a directory') % newpath)
2380 'a directory') % newpath)
2366 if not opts['force']:
2381 if not opts['force']:
2367 raise util.Abort(_('destination %s exists, '
2382 raise util.Abort(_('destination %s exists, '
2368 'use -f to force') % newpath)
2383 'use -f to force') % newpath)
2369 else:
2384 else:
2370 newpath = savename(path)
2385 newpath = savename(path)
2371 ui.warn(_("copy %s to %s\n") % (path, newpath))
2386 ui.warn(_("copy %s to %s\n") % (path, newpath))
2372 util.copyfiles(path, newpath)
2387 util.copyfiles(path, newpath)
2373 if opts['empty']:
2388 if opts['empty']:
2374 try:
2389 try:
2375 os.unlink(q.join(q.status_path))
2390 os.unlink(q.join(q.status_path))
2376 except:
2391 except:
2377 pass
2392 pass
2378 return 0
2393 return 0
2379
2394
2380 def strip(ui, repo, rev, **opts):
2395 def strip(ui, repo, rev, **opts):
2381 """strip a changeset and all its descendants from the repository
2396 """strip a changeset and all its descendants from the repository
2382
2397
2383 The strip command removes all changesets whose local revision
2398 The strip command removes all changesets whose local revision
2384 number is greater than or equal to REV, and then restores any
2399 number is greater than or equal to REV, and then restores any
2385 changesets that are not descendants of REV. If the working
2400 changesets that are not descendants of REV. If the working
2386 directory has uncommitted changes, the operation is aborted unless
2401 directory has uncommitted changes, the operation is aborted unless
2387 the --force flag is supplied.
2402 the --force flag is supplied.
2388
2403
2389 If a parent of the working directory is stripped, then the working
2404 If a parent of the working directory is stripped, then the working
2390 directory will automatically be updated to the most recent
2405 directory will automatically be updated to the most recent
2391 available ancestor of the stripped parent after the operation
2406 available ancestor of the stripped parent after the operation
2392 completes.
2407 completes.
2393
2408
2394 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2409 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2395 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2410 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2396 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2411 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2397 where BUNDLE is the bundle file created by the strip. Note that
2412 where BUNDLE is the bundle file created by the strip. Note that
2398 the local revision numbers will in general be different after the
2413 the local revision numbers will in general be different after the
2399 restore.
2414 restore.
2400
2415
2401 Use the --nobackup option to discard the backup bundle once the
2416 Use the --nobackup option to discard the backup bundle once the
2402 operation completes.
2417 operation completes.
2403 """
2418 """
2404 backup = 'all'
2419 backup = 'all'
2405 if opts['backup']:
2420 if opts['backup']:
2406 backup = 'strip'
2421 backup = 'strip'
2407 elif opts['nobackup']:
2422 elif opts['nobackup']:
2408 backup = 'none'
2423 backup = 'none'
2409
2424
2410 rev = repo.lookup(rev)
2425 rev = repo.lookup(rev)
2411 p = repo.dirstate.parents()
2426 p = repo.dirstate.parents()
2412 cl = repo.changelog
2427 cl = repo.changelog
2413 update = True
2428 update = True
2414 if p[0] == nullid:
2429 if p[0] == nullid:
2415 update = False
2430 update = False
2416 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2431 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2417 update = False
2432 update = False
2418 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2433 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2419 update = False
2434 update = False
2420
2435
2421 q = repo.mq
2436 q = repo.mq
2422 if q.applied:
2437 if q.applied:
2423 if rev == cl.ancestor(repo.lookup('qtip'), rev):
2438 if rev == cl.ancestor(repo.lookup('qtip'), rev):
2424 q.applied_dirty = True
2439 q.applied_dirty = True
2425 start = 0
2440 start = 0
2426 end = len(q.applied)
2441 end = len(q.applied)
2427 applied_list = [i.node for i in q.applied]
2442 applied_list = [i.node for i in q.applied]
2428 if rev in applied_list:
2443 if rev in applied_list:
2429 start = applied_list.index(rev)
2444 start = applied_list.index(rev)
2430 del q.applied[start:end]
2445 del q.applied[start:end]
2431 q.save_dirty()
2446 q.save_dirty()
2432
2447
2433 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2448 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2434 return 0
2449 return 0
2435
2450
2436 def select(ui, repo, *args, **opts):
2451 def select(ui, repo, *args, **opts):
2437 '''set or print guarded patches to push
2452 '''set or print guarded patches to push
2438
2453
2439 Use the :hg:`qguard` command to set or print guards on patch, then use
2454 Use the :hg:`qguard` command to set or print guards on patch, then use
2440 qselect to tell mq which guards to use. A patch will be pushed if
2455 qselect to tell mq which guards to use. A patch will be pushed if
2441 it has no guards or any positive guards match the currently
2456 it has no guards or any positive guards match the currently
2442 selected guard, but will not be pushed if any negative guards
2457 selected guard, but will not be pushed if any negative guards
2443 match the current guard. For example::
2458 match the current guard. For example::
2444
2459
2445 qguard foo.patch -stable (negative guard)
2460 qguard foo.patch -stable (negative guard)
2446 qguard bar.patch +stable (positive guard)
2461 qguard bar.patch +stable (positive guard)
2447 qselect stable
2462 qselect stable
2448
2463
2449 This activates the "stable" guard. mq will skip foo.patch (because
2464 This activates the "stable" guard. mq will skip foo.patch (because
2450 it has a negative match) but push bar.patch (because it has a
2465 it has a negative match) but push bar.patch (because it has a
2451 positive match).
2466 positive match).
2452
2467
2453 With no arguments, prints the currently active guards.
2468 With no arguments, prints the currently active guards.
2454 With one argument, sets the active guard.
2469 With one argument, sets the active guard.
2455
2470
2456 Use -n/--none to deactivate guards (no other arguments needed).
2471 Use -n/--none to deactivate guards (no other arguments needed).
2457 When no guards are active, patches with positive guards are
2472 When no guards are active, patches with positive guards are
2458 skipped and patches with negative guards are pushed.
2473 skipped and patches with negative guards are pushed.
2459
2474
2460 qselect can change the guards on applied patches. It does not pop
2475 qselect can change the guards on applied patches. It does not pop
2461 guarded patches by default. Use --pop to pop back to the last
2476 guarded patches by default. Use --pop to pop back to the last
2462 applied patch that is not guarded. Use --reapply (which implies
2477 applied patch that is not guarded. Use --reapply (which implies
2463 --pop) to push back to the current patch afterwards, but skip
2478 --pop) to push back to the current patch afterwards, but skip
2464 guarded patches.
2479 guarded patches.
2465
2480
2466 Use -s/--series to print a list of all guards in the series file
2481 Use -s/--series to print a list of all guards in the series file
2467 (no other arguments needed). Use -v for more information.'''
2482 (no other arguments needed). Use -v for more information.'''
2468
2483
2469 q = repo.mq
2484 q = repo.mq
2470 guards = q.active()
2485 guards = q.active()
2471 if args or opts['none']:
2486 if args or opts['none']:
2472 old_unapplied = q.unapplied(repo)
2487 old_unapplied = q.unapplied(repo)
2473 old_guarded = [i for i in xrange(len(q.applied)) if
2488 old_guarded = [i for i in xrange(len(q.applied)) if
2474 not q.pushable(i)[0]]
2489 not q.pushable(i)[0]]
2475 q.set_active(args)
2490 q.set_active(args)
2476 q.save_dirty()
2491 q.save_dirty()
2477 if not args:
2492 if not args:
2478 ui.status(_('guards deactivated\n'))
2493 ui.status(_('guards deactivated\n'))
2479 if not opts['pop'] and not opts['reapply']:
2494 if not opts['pop'] and not opts['reapply']:
2480 unapplied = q.unapplied(repo)
2495 unapplied = q.unapplied(repo)
2481 guarded = [i for i in xrange(len(q.applied))
2496 guarded = [i for i in xrange(len(q.applied))
2482 if not q.pushable(i)[0]]
2497 if not q.pushable(i)[0]]
2483 if len(unapplied) != len(old_unapplied):
2498 if len(unapplied) != len(old_unapplied):
2484 ui.status(_('number of unguarded, unapplied patches has '
2499 ui.status(_('number of unguarded, unapplied patches has '
2485 'changed from %d to %d\n') %
2500 'changed from %d to %d\n') %
2486 (len(old_unapplied), len(unapplied)))
2501 (len(old_unapplied), len(unapplied)))
2487 if len(guarded) != len(old_guarded):
2502 if len(guarded) != len(old_guarded):
2488 ui.status(_('number of guarded, applied patches has changed '
2503 ui.status(_('number of guarded, applied patches has changed '
2489 'from %d to %d\n') %
2504 'from %d to %d\n') %
2490 (len(old_guarded), len(guarded)))
2505 (len(old_guarded), len(guarded)))
2491 elif opts['series']:
2506 elif opts['series']:
2492 guards = {}
2507 guards = {}
2493 noguards = 0
2508 noguards = 0
2494 for gs in q.series_guards:
2509 for gs in q.series_guards:
2495 if not gs:
2510 if not gs:
2496 noguards += 1
2511 noguards += 1
2497 for g in gs:
2512 for g in gs:
2498 guards.setdefault(g, 0)
2513 guards.setdefault(g, 0)
2499 guards[g] += 1
2514 guards[g] += 1
2500 if ui.verbose:
2515 if ui.verbose:
2501 guards['NONE'] = noguards
2516 guards['NONE'] = noguards
2502 guards = guards.items()
2517 guards = guards.items()
2503 guards.sort(key=lambda x: x[0][1:])
2518 guards.sort(key=lambda x: x[0][1:])
2504 if guards:
2519 if guards:
2505 ui.note(_('guards in series file:\n'))
2520 ui.note(_('guards in series file:\n'))
2506 for guard, count in guards:
2521 for guard, count in guards:
2507 ui.note('%2d ' % count)
2522 ui.note('%2d ' % count)
2508 ui.write(guard, '\n')
2523 ui.write(guard, '\n')
2509 else:
2524 else:
2510 ui.note(_('no guards in series file\n'))
2525 ui.note(_('no guards in series file\n'))
2511 else:
2526 else:
2512 if guards:
2527 if guards:
2513 ui.note(_('active guards:\n'))
2528 ui.note(_('active guards:\n'))
2514 for g in guards:
2529 for g in guards:
2515 ui.write(g, '\n')
2530 ui.write(g, '\n')
2516 else:
2531 else:
2517 ui.write(_('no active guards\n'))
2532 ui.write(_('no active guards\n'))
2518 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2533 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2519 popped = False
2534 popped = False
2520 if opts['pop'] or opts['reapply']:
2535 if opts['pop'] or opts['reapply']:
2521 for i in xrange(len(q.applied)):
2536 for i in xrange(len(q.applied)):
2522 pushable, reason = q.pushable(i)
2537 pushable, reason = q.pushable(i)
2523 if not pushable:
2538 if not pushable:
2524 ui.status(_('popping guarded patches\n'))
2539 ui.status(_('popping guarded patches\n'))
2525 popped = True
2540 popped = True
2526 if i == 0:
2541 if i == 0:
2527 q.pop(repo, all=True)
2542 q.pop(repo, all=True)
2528 else:
2543 else:
2529 q.pop(repo, i - 1)
2544 q.pop(repo, i - 1)
2530 break
2545 break
2531 if popped:
2546 if popped:
2532 try:
2547 try:
2533 if reapply:
2548 if reapply:
2534 ui.status(_('reapplying unguarded patches\n'))
2549 ui.status(_('reapplying unguarded patches\n'))
2535 q.push(repo, reapply)
2550 q.push(repo, reapply)
2536 finally:
2551 finally:
2537 q.save_dirty()
2552 q.save_dirty()
2538
2553
2539 def finish(ui, repo, *revrange, **opts):
2554 def finish(ui, repo, *revrange, **opts):
2540 """move applied patches into repository history
2555 """move applied patches into repository history
2541
2556
2542 Finishes the specified revisions (corresponding to applied
2557 Finishes the specified revisions (corresponding to applied
2543 patches) by moving them out of mq control into regular repository
2558 patches) by moving them out of mq control into regular repository
2544 history.
2559 history.
2545
2560
2546 Accepts a revision range or the -a/--applied option. If --applied
2561 Accepts a revision range or the -a/--applied option. If --applied
2547 is specified, all applied mq revisions are removed from mq
2562 is specified, all applied mq revisions are removed from mq
2548 control. Otherwise, the given revisions must be at the base of the
2563 control. Otherwise, the given revisions must be at the base of the
2549 stack of applied patches.
2564 stack of applied patches.
2550
2565
2551 This can be especially useful if your changes have been applied to
2566 This can be especially useful if your changes have been applied to
2552 an upstream repository, or if you are about to push your changes
2567 an upstream repository, or if you are about to push your changes
2553 to upstream.
2568 to upstream.
2554 """
2569 """
2555 if not opts['applied'] and not revrange:
2570 if not opts['applied'] and not revrange:
2556 raise util.Abort(_('no revisions specified'))
2571 raise util.Abort(_('no revisions specified'))
2557 elif opts['applied']:
2572 elif opts['applied']:
2558 revrange = ('qbase:qtip',) + revrange
2573 revrange = ('qbase:qtip',) + revrange
2559
2574
2560 q = repo.mq
2575 q = repo.mq
2561 if not q.applied:
2576 if not q.applied:
2562 ui.status(_('no patches applied\n'))
2577 ui.status(_('no patches applied\n'))
2563 return 0
2578 return 0
2564
2579
2565 revs = cmdutil.revrange(repo, revrange)
2580 revs = cmdutil.revrange(repo, revrange)
2566 q.finish(repo, revs)
2581 q.finish(repo, revs)
2567 q.save_dirty()
2582 q.save_dirty()
2568 return 0
2583 return 0
2569
2584
2570 def qqueue(ui, repo, name=None, **opts):
2585 def qqueue(ui, repo, name=None, **opts):
2571 '''manage multiple patch queues
2586 '''manage multiple patch queues
2572
2587
2573 Supports switching between different patch queues, as well as creating
2588 Supports switching between different patch queues, as well as creating
2574 new patch queues and deleting existing ones.
2589 new patch queues and deleting existing ones.
2575
2590
2576 Omitting a queue name or specifying -l/--list will show you the registered
2591 Omitting a queue name or specifying -l/--list will show you the registered
2577 queues - by default the "normal" patches queue is registered. The currently
2592 queues - by default the "normal" patches queue is registered. The currently
2578 active queue will be marked with "(active)".
2593 active queue will be marked with "(active)".
2579
2594
2580 To create a new queue, use -c/--create. The queue is automatically made
2595 To create a new queue, use -c/--create. The queue is automatically made
2581 active, except in the case where there are applied patches from the
2596 active, except in the case where there are applied patches from the
2582 currently active queue in the repository. Then the queue will only be
2597 currently active queue in the repository. Then the queue will only be
2583 created and switching will fail.
2598 created and switching will fail.
2584
2599
2585 To delete an existing queue, use --delete. You cannot delete the currently
2600 To delete an existing queue, use --delete. You cannot delete the currently
2586 active queue.
2601 active queue.
2587 '''
2602 '''
2588
2603
2589 q = repo.mq
2604 q = repo.mq
2590
2605
2591 _defaultqueue = 'patches'
2606 _defaultqueue = 'patches'
2592 _allqueues = 'patches.queues'
2607 _allqueues = 'patches.queues'
2593 _activequeue = 'patches.queue'
2608 _activequeue = 'patches.queue'
2594
2609
2595 def _getcurrent():
2610 def _getcurrent():
2596 cur = os.path.basename(q.path)
2611 cur = os.path.basename(q.path)
2597 if cur.startswith('patches-'):
2612 if cur.startswith('patches-'):
2598 cur = cur[8:]
2613 cur = cur[8:]
2599 return cur
2614 return cur
2600
2615
2601 def _noqueues():
2616 def _noqueues():
2602 try:
2617 try:
2603 fh = repo.opener(_allqueues, 'r')
2618 fh = repo.opener(_allqueues, 'r')
2604 fh.close()
2619 fh.close()
2605 except IOError:
2620 except IOError:
2606 return True
2621 return True
2607
2622
2608 return False
2623 return False
2609
2624
2610 def _getqueues():
2625 def _getqueues():
2611 current = _getcurrent()
2626 current = _getcurrent()
2612
2627
2613 try:
2628 try:
2614 fh = repo.opener(_allqueues, 'r')
2629 fh = repo.opener(_allqueues, 'r')
2615 queues = [queue.strip() for queue in fh if queue.strip()]
2630 queues = [queue.strip() for queue in fh if queue.strip()]
2616 if current not in queues:
2631 if current not in queues:
2617 queues.append(current)
2632 queues.append(current)
2618 except IOError:
2633 except IOError:
2619 queues = [_defaultqueue]
2634 queues = [_defaultqueue]
2620
2635
2621 return sorted(queues)
2636 return sorted(queues)
2622
2637
2623 def _setactive(name):
2638 def _setactive(name):
2624 if q.applied:
2639 if q.applied:
2625 raise util.Abort(_('patches applied - cannot set new queue active'))
2640 raise util.Abort(_('patches applied - cannot set new queue active'))
2626
2641
2627 fh = repo.opener(_activequeue, 'w')
2642 fh = repo.opener(_activequeue, 'w')
2628 if name != 'patches':
2643 if name != 'patches':
2629 fh.write(name)
2644 fh.write(name)
2630 fh.close()
2645 fh.close()
2631
2646
2632 def _addqueue(name):
2647 def _addqueue(name):
2633 fh = repo.opener(_allqueues, 'a')
2648 fh = repo.opener(_allqueues, 'a')
2634 fh.write('%s\n' % (name,))
2649 fh.write('%s\n' % (name,))
2635 fh.close()
2650 fh.close()
2636
2651
2637 def _validname(name):
2652 def _validname(name):
2638 for n in name:
2653 for n in name:
2639 if n in ':\\/.':
2654 if n in ':\\/.':
2640 return False
2655 return False
2641 return True
2656 return True
2642
2657
2643 if not name or opts.get('list'):
2658 if not name or opts.get('list'):
2644 current = _getcurrent()
2659 current = _getcurrent()
2645 for queue in _getqueues():
2660 for queue in _getqueues():
2646 ui.write('%s' % (queue,))
2661 ui.write('%s' % (queue,))
2647 if queue == current:
2662 if queue == current:
2648 ui.write(_(' (active)\n'))
2663 ui.write(_(' (active)\n'))
2649 else:
2664 else:
2650 ui.write('\n')
2665 ui.write('\n')
2651 return
2666 return
2652
2667
2653 if not _validname(name):
2668 if not _validname(name):
2654 raise util.Abort(
2669 raise util.Abort(
2655 _('invalid queue name, may not contain the characters ":\\/."'))
2670 _('invalid queue name, may not contain the characters ":\\/."'))
2656
2671
2657 existing = _getqueues()
2672 existing = _getqueues()
2658
2673
2659 if opts.get('create'):
2674 if opts.get('create'):
2660 if name in existing:
2675 if name in existing:
2661 raise util.Abort(_('queue "%s" already exists') % name)
2676 raise util.Abort(_('queue "%s" already exists') % name)
2662 if _noqueues():
2677 if _noqueues():
2663 _addqueue(_defaultqueue)
2678 _addqueue(_defaultqueue)
2664 _addqueue(name)
2679 _addqueue(name)
2665 _setactive(name)
2680 _setactive(name)
2666 elif opts.get('delete'):
2681 elif opts.get('delete'):
2667 if name not in existing:
2682 if name not in existing:
2668 raise util.Abort(_('cannot delete queue that does not exist'))
2683 raise util.Abort(_('cannot delete queue that does not exist'))
2669
2684
2670 current = _getcurrent()
2685 current = _getcurrent()
2671
2686
2672 if name == current:
2687 if name == current:
2673 raise util.Abort(_('cannot delete currently active queue'))
2688 raise util.Abort(_('cannot delete currently active queue'))
2674
2689
2675 fh = repo.opener('patches.queues.new', 'w')
2690 fh = repo.opener('patches.queues.new', 'w')
2676 for queue in existing:
2691 for queue in existing:
2677 if queue == name:
2692 if queue == name:
2678 continue
2693 continue
2679 fh.write('%s\n' % (queue,))
2694 fh.write('%s\n' % (queue,))
2680 fh.close()
2695 fh.close()
2681 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2696 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2682 else:
2697 else:
2683 if name not in existing:
2698 if name not in existing:
2684 raise util.Abort(_('use --create to create a new queue'))
2699 raise util.Abort(_('use --create to create a new queue'))
2685 _setactive(name)
2700 _setactive(name)
2686
2701
2687 def reposetup(ui, repo):
2702 def reposetup(ui, repo):
2688 class mqrepo(repo.__class__):
2703 class mqrepo(repo.__class__):
2689 @util.propertycache
2704 @util.propertycache
2690 def mq(self):
2705 def mq(self):
2691 return queue(self.ui, self.join(""))
2706 return queue(self.ui, self.join(""))
2692
2707
2693 def abort_if_wdir_patched(self, errmsg, force=False):
2708 def abort_if_wdir_patched(self, errmsg, force=False):
2694 if self.mq.applied and not force:
2709 if self.mq.applied and not force:
2695 parent = self.dirstate.parents()[0]
2710 parent = self.dirstate.parents()[0]
2696 if parent in [s.node for s in self.mq.applied]:
2711 if parent in [s.node for s in self.mq.applied]:
2697 raise util.Abort(errmsg)
2712 raise util.Abort(errmsg)
2698
2713
2699 def commit(self, text="", user=None, date=None, match=None,
2714 def commit(self, text="", user=None, date=None, match=None,
2700 force=False, editor=False, extra={}):
2715 force=False, editor=False, extra={}):
2701 self.abort_if_wdir_patched(
2716 self.abort_if_wdir_patched(
2702 _('cannot commit over an applied mq patch'),
2717 _('cannot commit over an applied mq patch'),
2703 force)
2718 force)
2704
2719
2705 return super(mqrepo, self).commit(text, user, date, match, force,
2720 return super(mqrepo, self).commit(text, user, date, match, force,
2706 editor, extra)
2721 editor, extra)
2707
2722
2708 def push(self, remote, force=False, revs=None, newbranch=False):
2723 def push(self, remote, force=False, revs=None, newbranch=False):
2709 if self.mq.applied and not force and not revs:
2724 if self.mq.applied and not force and not revs:
2710 raise util.Abort(_('source has mq patches applied'))
2725 raise util.Abort(_('source has mq patches applied'))
2711 return super(mqrepo, self).push(remote, force, revs, newbranch)
2726 return super(mqrepo, self).push(remote, force, revs, newbranch)
2712
2727
2713 def _findtags(self):
2728 def _findtags(self):
2714 '''augment tags from base class with patch tags'''
2729 '''augment tags from base class with patch tags'''
2715 result = super(mqrepo, self)._findtags()
2730 result = super(mqrepo, self)._findtags()
2716
2731
2717 q = self.mq
2732 q = self.mq
2718 if not q.applied:
2733 if not q.applied:
2719 return result
2734 return result
2720
2735
2721 mqtags = [(patch.node, patch.name) for patch in q.applied]
2736 mqtags = [(patch.node, patch.name) for patch in q.applied]
2722
2737
2723 if mqtags[-1][0] not in self.changelog.nodemap:
2738 if mqtags[-1][0] not in self.changelog.nodemap:
2724 self.ui.warn(_('mq status file refers to unknown node %s\n')
2739 self.ui.warn(_('mq status file refers to unknown node %s\n')
2725 % short(mqtags[-1][0]))
2740 % short(mqtags[-1][0]))
2726 return result
2741 return result
2727
2742
2728 mqtags.append((mqtags[-1][0], 'qtip'))
2743 mqtags.append((mqtags[-1][0], 'qtip'))
2729 mqtags.append((mqtags[0][0], 'qbase'))
2744 mqtags.append((mqtags[0][0], 'qbase'))
2730 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2745 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2731 tags = result[0]
2746 tags = result[0]
2732 for patch in mqtags:
2747 for patch in mqtags:
2733 if patch[1] in tags:
2748 if patch[1] in tags:
2734 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2749 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2735 % patch[1])
2750 % patch[1])
2736 else:
2751 else:
2737 tags[patch[1]] = patch[0]
2752 tags[patch[1]] = patch[0]
2738
2753
2739 return result
2754 return result
2740
2755
2741 def _branchtags(self, partial, lrev):
2756 def _branchtags(self, partial, lrev):
2742 q = self.mq
2757 q = self.mq
2743 if not q.applied:
2758 if not q.applied:
2744 return super(mqrepo, self)._branchtags(partial, lrev)
2759 return super(mqrepo, self)._branchtags(partial, lrev)
2745
2760
2746 cl = self.changelog
2761 cl = self.changelog
2747 qbasenode = q.applied[0].node
2762 qbasenode = q.applied[0].node
2748 if qbasenode not in cl.nodemap:
2763 if qbasenode not in cl.nodemap:
2749 self.ui.warn(_('mq status file refers to unknown node %s\n')
2764 self.ui.warn(_('mq status file refers to unknown node %s\n')
2750 % short(qbasenode))
2765 % short(qbasenode))
2751 return super(mqrepo, self)._branchtags(partial, lrev)
2766 return super(mqrepo, self)._branchtags(partial, lrev)
2752
2767
2753 qbase = cl.rev(qbasenode)
2768 qbase = cl.rev(qbasenode)
2754 start = lrev + 1
2769 start = lrev + 1
2755 if start < qbase:
2770 if start < qbase:
2756 # update the cache (excluding the patches) and save it
2771 # update the cache (excluding the patches) and save it
2757 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2772 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2758 self._updatebranchcache(partial, ctxgen)
2773 self._updatebranchcache(partial, ctxgen)
2759 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2774 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2760 start = qbase
2775 start = qbase
2761 # if start = qbase, the cache is as updated as it should be.
2776 # if start = qbase, the cache is as updated as it should be.
2762 # if start > qbase, the cache includes (part of) the patches.
2777 # if start > qbase, the cache includes (part of) the patches.
2763 # we might as well use it, but we won't save it.
2778 # we might as well use it, but we won't save it.
2764
2779
2765 # update the cache up to the tip
2780 # update the cache up to the tip
2766 ctxgen = (self[r] for r in xrange(start, len(cl)))
2781 ctxgen = (self[r] for r in xrange(start, len(cl)))
2767 self._updatebranchcache(partial, ctxgen)
2782 self._updatebranchcache(partial, ctxgen)
2768
2783
2769 return partial
2784 return partial
2770
2785
2771 if repo.local():
2786 if repo.local():
2772 repo.__class__ = mqrepo
2787 repo.__class__ = mqrepo
2773
2788
2774 def mqimport(orig, ui, repo, *args, **kwargs):
2789 def mqimport(orig, ui, repo, *args, **kwargs):
2775 if (hasattr(repo, 'abort_if_wdir_patched')
2790 if (hasattr(repo, 'abort_if_wdir_patched')
2776 and not kwargs.get('no_commit', False)):
2791 and not kwargs.get('no_commit', False)):
2777 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2792 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2778 kwargs.get('force'))
2793 kwargs.get('force'))
2779 return orig(ui, repo, *args, **kwargs)
2794 return orig(ui, repo, *args, **kwargs)
2780
2795
2781 def mqinit(orig, ui, *args, **kwargs):
2796 def mqinit(orig, ui, *args, **kwargs):
2782 mq = kwargs.pop('mq', None)
2797 mq = kwargs.pop('mq', None)
2783
2798
2784 if not mq:
2799 if not mq:
2785 return orig(ui, *args, **kwargs)
2800 return orig(ui, *args, **kwargs)
2786
2801
2787 if args:
2802 if args:
2788 repopath = args[0]
2803 repopath = args[0]
2789 if not hg.islocal(repopath):
2804 if not hg.islocal(repopath):
2790 raise util.Abort(_('only a local queue repository '
2805 raise util.Abort(_('only a local queue repository '
2791 'may be initialized'))
2806 'may be initialized'))
2792 else:
2807 else:
2793 repopath = cmdutil.findrepo(os.getcwd())
2808 repopath = cmdutil.findrepo(os.getcwd())
2794 if not repopath:
2809 if not repopath:
2795 raise util.Abort(_('There is no Mercurial repository here '
2810 raise util.Abort(_('There is no Mercurial repository here '
2796 '(.hg not found)'))
2811 '(.hg not found)'))
2797 repo = hg.repository(ui, repopath)
2812 repo = hg.repository(ui, repopath)
2798 return qinit(ui, repo, True)
2813 return qinit(ui, repo, True)
2799
2814
2800 def mqcommand(orig, ui, repo, *args, **kwargs):
2815 def mqcommand(orig, ui, repo, *args, **kwargs):
2801 """Add --mq option to operate on patch repository instead of main"""
2816 """Add --mq option to operate on patch repository instead of main"""
2802
2817
2803 # some commands do not like getting unknown options
2818 # some commands do not like getting unknown options
2804 mq = kwargs.pop('mq', None)
2819 mq = kwargs.pop('mq', None)
2805
2820
2806 if not mq:
2821 if not mq:
2807 return orig(ui, repo, *args, **kwargs)
2822 return orig(ui, repo, *args, **kwargs)
2808
2823
2809 q = repo.mq
2824 q = repo.mq
2810 r = q.qrepo()
2825 r = q.qrepo()
2811 if not r:
2826 if not r:
2812 raise util.Abort(_('no queue repository'))
2827 raise util.Abort(_('no queue repository'))
2813 return orig(r.ui, r, *args, **kwargs)
2828 return orig(r.ui, r, *args, **kwargs)
2814
2829
2815 def summary(orig, ui, repo, *args, **kwargs):
2830 def summary(orig, ui, repo, *args, **kwargs):
2816 r = orig(ui, repo, *args, **kwargs)
2831 r = orig(ui, repo, *args, **kwargs)
2817 q = repo.mq
2832 q = repo.mq
2818 m = []
2833 m = []
2819 a, u = len(q.applied), len(q.unapplied(repo))
2834 a, u = len(q.applied), len(q.unapplied(repo))
2820 if a:
2835 if a:
2821 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
2836 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
2822 if u:
2837 if u:
2823 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
2838 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
2824 if m:
2839 if m:
2825 ui.write("mq: %s\n" % ', '.join(m))
2840 ui.write("mq: %s\n" % ', '.join(m))
2826 else:
2841 else:
2827 ui.note(_("mq: (empty queue)\n"))
2842 ui.note(_("mq: (empty queue)\n"))
2828 return r
2843 return r
2829
2844
2830 def uisetup(ui):
2845 def uisetup(ui):
2831 mqopt = [('', 'mq', None, _("operate on patch repository"))]
2846 mqopt = [('', 'mq', None, _("operate on patch repository"))]
2832
2847
2833 extensions.wrapcommand(commands.table, 'import', mqimport)
2848 extensions.wrapcommand(commands.table, 'import', mqimport)
2834 extensions.wrapcommand(commands.table, 'summary', summary)
2849 extensions.wrapcommand(commands.table, 'summary', summary)
2835
2850
2836 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
2851 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
2837 entry[1].extend(mqopt)
2852 entry[1].extend(mqopt)
2838
2853
2839 norepo = commands.norepo.split(" ")
2854 norepo = commands.norepo.split(" ")
2840 for cmd in commands.table.keys():
2855 for cmd in commands.table.keys():
2841 cmd = cmdutil.parsealiases(cmd)[0]
2856 cmd = cmdutil.parsealiases(cmd)[0]
2842 if cmd in norepo:
2857 if cmd in norepo:
2843 continue
2858 continue
2844 entry = extensions.wrapcommand(commands.table, cmd, mqcommand)
2859 entry = extensions.wrapcommand(commands.table, cmd, mqcommand)
2845 entry[1].extend(mqopt)
2860 entry[1].extend(mqopt)
2846
2861
2847 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2862 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2848
2863
2849 cmdtable = {
2864 cmdtable = {
2850 "qapplied":
2865 "qapplied":
2851 (applied,
2866 (applied,
2852 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2867 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2853 _('hg qapplied [-1] [-s] [PATCH]')),
2868 _('hg qapplied [-1] [-s] [PATCH]')),
2854 "qclone":
2869 "qclone":
2855 (clone,
2870 (clone,
2856 [('', 'pull', None, _('use pull protocol to copy metadata')),
2871 [('', 'pull', None, _('use pull protocol to copy metadata')),
2857 ('U', 'noupdate', None, _('do not update the new working directories')),
2872 ('U', 'noupdate', None, _('do not update the new working directories')),
2858 ('', 'uncompressed', None,
2873 ('', 'uncompressed', None,
2859 _('use uncompressed transfer (fast over LAN)')),
2874 _('use uncompressed transfer (fast over LAN)')),
2860 ('p', 'patches', '',
2875 ('p', 'patches', '',
2861 _('location of source patch repository'), _('REPO')),
2876 _('location of source patch repository'), _('REPO')),
2862 ] + commands.remoteopts,
2877 ] + commands.remoteopts,
2863 _('hg qclone [OPTION]... SOURCE [DEST]')),
2878 _('hg qclone [OPTION]... SOURCE [DEST]')),
2864 "qcommit|qci":
2879 "qcommit|qci":
2865 (commit,
2880 (commit,
2866 commands.table["^commit|ci"][1],
2881 commands.table["^commit|ci"][1],
2867 _('hg qcommit [OPTION]... [FILE]...')),
2882 _('hg qcommit [OPTION]... [FILE]...')),
2868 "^qdiff":
2883 "^qdiff":
2869 (diff,
2884 (diff,
2870 commands.diffopts + commands.diffopts2 + commands.walkopts,
2885 commands.diffopts + commands.diffopts2 + commands.walkopts,
2871 _('hg qdiff [OPTION]... [FILE]...')),
2886 _('hg qdiff [OPTION]... [FILE]...')),
2872 "qdelete|qremove|qrm":
2887 "qdelete|qremove|qrm":
2873 (delete,
2888 (delete,
2874 [('k', 'keep', None, _('keep patch file')),
2889 [('k', 'keep', None, _('keep patch file')),
2875 ('r', 'rev', [],
2890 ('r', 'rev', [],
2876 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2891 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2877 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2892 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2878 'qfold':
2893 'qfold':
2879 (fold,
2894 (fold,
2880 [('e', 'edit', None, _('edit patch header')),
2895 [('e', 'edit', None, _('edit patch header')),
2881 ('k', 'keep', None, _('keep folded patch files')),
2896 ('k', 'keep', None, _('keep folded patch files')),
2882 ] + commands.commitopts,
2897 ] + commands.commitopts,
2883 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2898 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2884 'qgoto':
2899 'qgoto':
2885 (goto,
2900 (goto,
2886 [('f', 'force', None, _('overwrite any local changes'))],
2901 [('f', 'force', None, _('overwrite any local changes'))],
2887 _('hg qgoto [OPTION]... PATCH')),
2902 _('hg qgoto [OPTION]... PATCH')),
2888 'qguard':
2903 'qguard':
2889 (guard,
2904 (guard,
2890 [('l', 'list', None, _('list all patches and guards')),
2905 [('l', 'list', None, _('list all patches and guards')),
2891 ('n', 'none', None, _('drop all guards'))],
2906 ('n', 'none', None, _('drop all guards'))],
2892 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
2907 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
2893 'qheader': (header, [], _('hg qheader [PATCH]')),
2908 'qheader': (header, [], _('hg qheader [PATCH]')),
2894 "qimport":
2909 "qimport":
2895 (qimport,
2910 (qimport,
2896 [('e', 'existing', None, _('import file in patch directory')),
2911 [('e', 'existing', None, _('import file in patch directory')),
2897 ('n', 'name', '',
2912 ('n', 'name', '',
2898 _('name of patch file'), _('NAME')),
2913 _('name of patch file'), _('NAME')),
2899 ('f', 'force', None, _('overwrite existing files')),
2914 ('f', 'force', None, _('overwrite existing files')),
2900 ('r', 'rev', [],
2915 ('r', 'rev', [],
2901 _('place existing revisions under mq control'), _('REV')),
2916 _('place existing revisions under mq control'), _('REV')),
2902 ('g', 'git', None, _('use git extended diff format')),
2917 ('g', 'git', None, _('use git extended diff format')),
2903 ('P', 'push', None, _('qpush after importing'))],
2918 ('P', 'push', None, _('qpush after importing'))],
2904 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2919 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2905 "^qinit":
2920 "^qinit":
2906 (init,
2921 (init,
2907 [('c', 'create-repo', None, _('create queue repository'))],
2922 [('c', 'create-repo', None, _('create queue repository'))],
2908 _('hg qinit [-c]')),
2923 _('hg qinit [-c]')),
2909 "^qnew":
2924 "^qnew":
2910 (new,
2925 (new,
2911 [('e', 'edit', None, _('edit commit message')),
2926 [('e', 'edit', None, _('edit commit message')),
2912 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2927 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2913 ('g', 'git', None, _('use git extended diff format')),
2928 ('g', 'git', None, _('use git extended diff format')),
2914 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2929 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2915 ('u', 'user', '',
2930 ('u', 'user', '',
2916 _('add "From: <USER>" to patch'), _('USER')),
2931 _('add "From: <USER>" to patch'), _('USER')),
2917 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2932 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2918 ('d', 'date', '',
2933 ('d', 'date', '',
2919 _('add "Date: <DATE>" to patch'), _('DATE'))
2934 _('add "Date: <DATE>" to patch'), _('DATE'))
2920 ] + commands.walkopts + commands.commitopts,
2935 ] + commands.walkopts + commands.commitopts,
2921 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')),
2936 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')),
2922 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2937 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2923 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2938 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2924 "^qpop":
2939 "^qpop":
2925 (pop,
2940 (pop,
2926 [('a', 'all', None, _('pop all patches')),
2941 [('a', 'all', None, _('pop all patches')),
2927 ('n', 'name', '',
2942 ('n', 'name', '',
2928 _('queue name to pop (DEPRECATED)'), _('NAME')),
2943 _('queue name to pop (DEPRECATED)'), _('NAME')),
2929 ('f', 'force', None, _('forget any local changes to patched files'))],
2944 ('f', 'force', None, _('forget any local changes to patched files'))],
2930 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2945 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2931 "^qpush":
2946 "^qpush":
2932 (push,
2947 (push,
2933 [('f', 'force', None, _('apply if the patch has rejects')),
2948 [('f', 'force', None, _('apply if the patch has rejects')),
2934 ('l', 'list', None, _('list patch name in commit text')),
2949 ('l', 'list', None, _('list patch name in commit text')),
2935 ('a', 'all', None, _('apply all patches')),
2950 ('a', 'all', None, _('apply all patches')),
2936 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2951 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2937 ('n', 'name', '',
2952 ('n', 'name', '',
2938 _('merge queue name (DEPRECATED)'), _('NAME')),
2953 _('merge queue name (DEPRECATED)'), _('NAME')),
2939 ('', 'move', None, _('reorder patch series and apply only the patch'))],
2954 ('', 'move', None, _('reorder patch series and apply only the patch'))],
2940 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [--move] [PATCH | INDEX]')),
2955 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [--move] [PATCH | INDEX]')),
2941 "^qrefresh":
2956 "^qrefresh":
2942 (refresh,
2957 (refresh,
2943 [('e', 'edit', None, _('edit commit message')),
2958 [('e', 'edit', None, _('edit commit message')),
2944 ('g', 'git', None, _('use git extended diff format')),
2959 ('g', 'git', None, _('use git extended diff format')),
2945 ('s', 'short', None,
2960 ('s', 'short', None,
2946 _('refresh only files already in the patch and specified files')),
2961 _('refresh only files already in the patch and specified files')),
2947 ('U', 'currentuser', None,
2962 ('U', 'currentuser', None,
2948 _('add/update author field in patch with current user')),
2963 _('add/update author field in patch with current user')),
2949 ('u', 'user', '',
2964 ('u', 'user', '',
2950 _('add/update author field in patch with given user'), _('USER')),
2965 _('add/update author field in patch with given user'), _('USER')),
2951 ('D', 'currentdate', None,
2966 ('D', 'currentdate', None,
2952 _('add/update date field in patch with current date')),
2967 _('add/update date field in patch with current date')),
2953 ('d', 'date', '',
2968 ('d', 'date', '',
2954 _('add/update date field in patch with given date'), _('DATE'))
2969 _('add/update date field in patch with given date'), _('DATE'))
2955 ] + commands.walkopts + commands.commitopts,
2970 ] + commands.walkopts + commands.commitopts,
2956 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2971 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2957 'qrename|qmv':
2972 'qrename|qmv':
2958 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2973 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2959 "qrestore":
2974 "qrestore":
2960 (restore,
2975 (restore,
2961 [('d', 'delete', None, _('delete save entry')),
2976 [('d', 'delete', None, _('delete save entry')),
2962 ('u', 'update', None, _('update queue working directory'))],
2977 ('u', 'update', None, _('update queue working directory'))],
2963 _('hg qrestore [-d] [-u] REV')),
2978 _('hg qrestore [-d] [-u] REV')),
2964 "qsave":
2979 "qsave":
2965 (save,
2980 (save,
2966 [('c', 'copy', None, _('copy patch directory')),
2981 [('c', 'copy', None, _('copy patch directory')),
2967 ('n', 'name', '',
2982 ('n', 'name', '',
2968 _('copy directory name'), _('NAME')),
2983 _('copy directory name'), _('NAME')),
2969 ('e', 'empty', None, _('clear queue status file')),
2984 ('e', 'empty', None, _('clear queue status file')),
2970 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2985 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2971 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2986 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2972 "qselect":
2987 "qselect":
2973 (select,
2988 (select,
2974 [('n', 'none', None, _('disable all guards')),
2989 [('n', 'none', None, _('disable all guards')),
2975 ('s', 'series', None, _('list all guards in series file')),
2990 ('s', 'series', None, _('list all guards in series file')),
2976 ('', 'pop', None, _('pop to before first guarded applied patch')),
2991 ('', 'pop', None, _('pop to before first guarded applied patch')),
2977 ('', 'reapply', None, _('pop, then reapply patches'))],
2992 ('', 'reapply', None, _('pop, then reapply patches'))],
2978 _('hg qselect [OPTION]... [GUARD]...')),
2993 _('hg qselect [OPTION]... [GUARD]...')),
2979 "qseries":
2994 "qseries":
2980 (series,
2995 (series,
2981 [('m', 'missing', None, _('print patches not in series')),
2996 [('m', 'missing', None, _('print patches not in series')),
2982 ] + seriesopts,
2997 ] + seriesopts,
2983 _('hg qseries [-ms]')),
2998 _('hg qseries [-ms]')),
2984 "strip":
2999 "strip":
2985 (strip,
3000 (strip,
2986 [('f', 'force', None, _('force removal of changesets even if the '
3001 [('f', 'force', None, _('force removal of changesets even if the '
2987 'working directory has uncommitted changes')),
3002 'working directory has uncommitted changes')),
2988 ('b', 'backup', None, _('bundle only changesets with local revision'
3003 ('b', 'backup', None, _('bundle only changesets with local revision'
2989 ' number greater than REV which are not'
3004 ' number greater than REV which are not'
2990 ' descendants of REV (DEPRECATED)')),
3005 ' descendants of REV (DEPRECATED)')),
2991 ('n', 'nobackup', None, _('no backups'))],
3006 ('n', 'nobackup', None, _('no backups'))],
2992 _('hg strip [-f] [-n] REV')),
3007 _('hg strip [-f] [-n] REV')),
2993 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
3008 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2994 "qunapplied":
3009 "qunapplied":
2995 (unapplied,
3010 (unapplied,
2996 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
3011 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2997 _('hg qunapplied [-1] [-s] [PATCH]')),
3012 _('hg qunapplied [-1] [-s] [PATCH]')),
2998 "qfinish":
3013 "qfinish":
2999 (finish,
3014 (finish,
3000 [('a', 'applied', None, _('finish all applied changesets'))],
3015 [('a', 'applied', None, _('finish all applied changesets'))],
3001 _('hg qfinish [-a] [REV]...')),
3016 _('hg qfinish [-a] [REV]...')),
3002 'qqueue':
3017 'qqueue':
3003 (qqueue,
3018 (qqueue,
3004 [
3019 [
3005 ('l', 'list', False, _('list all available queues')),
3020 ('l', 'list', False, _('list all available queues')),
3006 ('c', 'create', False, _('create new queue')),
3021 ('c', 'create', False, _('create new queue')),
3007 ('', 'delete', False, _('delete reference to queue')),
3022 ('', 'delete', False, _('delete reference to queue')),
3008 ],
3023 ],
3009 _('[OPTION] [QUEUE]')),
3024 _('[OPTION] [QUEUE]')),
3010 }
3025 }
3011
3026
3012 colortable = {'qguard.negative': 'red',
3027 colortable = {'qguard.negative': 'red',
3013 'qguard.positive': 'yellow',
3028 'qguard.positive': 'yellow',
3014 'qguard.unguarded': 'green',
3029 'qguard.unguarded': 'green',
3015 'qseries.applied': 'blue bold underline',
3030 'qseries.applied': 'blue bold underline',
3016 'qseries.guarded': 'black bold',
3031 'qseries.guarded': 'black bold',
3017 'qseries.missing': 'red bold',
3032 'qseries.missing': 'red bold',
3018 'qseries.unapplied': 'black bold'}
3033 'qseries.unapplied': 'black bold'}
@@ -1,1086 +1,1088 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex
8 from node import nullid, nullrev, short, hex
9 from i18n import _
9 from i18n import _
10 import ancestor, bdiff, error, util, subrepo, patch
10 import ancestor, bdiff, error, util, subrepo, patch
11 import os, errno, stat
11 import os, errno, stat
12
12
13 propertycache = util.propertycache
13 propertycache = util.propertycache
14
14
15 class changectx(object):
15 class changectx(object):
16 """A changecontext object makes access to data related to a particular
16 """A changecontext object makes access to data related to a particular
17 changeset convenient."""
17 changeset convenient."""
18 def __init__(self, repo, changeid=''):
18 def __init__(self, repo, changeid=''):
19 """changeid is a revision number, node, or tag"""
19 """changeid is a revision number, node, or tag"""
20 if changeid == '':
20 if changeid == '':
21 changeid = '.'
21 changeid = '.'
22 self._repo = repo
22 self._repo = repo
23 if isinstance(changeid, (long, int)):
23 if isinstance(changeid, (long, int)):
24 self._rev = changeid
24 self._rev = changeid
25 self._node = self._repo.changelog.node(changeid)
25 self._node = self._repo.changelog.node(changeid)
26 else:
26 else:
27 self._node = self._repo.lookup(changeid)
27 self._node = self._repo.lookup(changeid)
28 self._rev = self._repo.changelog.rev(self._node)
28 self._rev = self._repo.changelog.rev(self._node)
29
29
30 def __str__(self):
30 def __str__(self):
31 return short(self.node())
31 return short(self.node())
32
32
33 def __int__(self):
33 def __int__(self):
34 return self.rev()
34 return self.rev()
35
35
36 def __repr__(self):
36 def __repr__(self):
37 return "<changectx %s>" % str(self)
37 return "<changectx %s>" % str(self)
38
38
39 def __hash__(self):
39 def __hash__(self):
40 try:
40 try:
41 return hash(self._rev)
41 return hash(self._rev)
42 except AttributeError:
42 except AttributeError:
43 return id(self)
43 return id(self)
44
44
45 def __eq__(self, other):
45 def __eq__(self, other):
46 try:
46 try:
47 return self._rev == other._rev
47 return self._rev == other._rev
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50
50
51 def __ne__(self, other):
51 def __ne__(self, other):
52 return not (self == other)
52 return not (self == other)
53
53
54 def __nonzero__(self):
54 def __nonzero__(self):
55 return self._rev != nullrev
55 return self._rev != nullrev
56
56
57 @propertycache
57 @propertycache
58 def _changeset(self):
58 def _changeset(self):
59 return self._repo.changelog.read(self.node())
59 return self._repo.changelog.read(self.node())
60
60
61 @propertycache
61 @propertycache
62 def _manifest(self):
62 def _manifest(self):
63 return self._repo.manifest.read(self._changeset[0])
63 return self._repo.manifest.read(self._changeset[0])
64
64
65 @propertycache
65 @propertycache
66 def _manifestdelta(self):
66 def _manifestdelta(self):
67 return self._repo.manifest.readdelta(self._changeset[0])
67 return self._repo.manifest.readdelta(self._changeset[0])
68
68
69 @propertycache
69 @propertycache
70 def _parents(self):
70 def _parents(self):
71 p = self._repo.changelog.parentrevs(self._rev)
71 p = self._repo.changelog.parentrevs(self._rev)
72 if p[1] == nullrev:
72 if p[1] == nullrev:
73 p = p[:-1]
73 p = p[:-1]
74 return [changectx(self._repo, x) for x in p]
74 return [changectx(self._repo, x) for x in p]
75
75
76 @propertycache
76 @propertycache
77 def substate(self):
77 def substate(self):
78 return subrepo.state(self)
78 return subrepo.state(self)
79
79
80 def __contains__(self, key):
80 def __contains__(self, key):
81 return key in self._manifest
81 return key in self._manifest
82
82
83 def __getitem__(self, key):
83 def __getitem__(self, key):
84 return self.filectx(key)
84 return self.filectx(key)
85
85
86 def __iter__(self):
86 def __iter__(self):
87 for f in sorted(self._manifest):
87 for f in sorted(self._manifest):
88 yield f
88 yield f
89
89
90 def changeset(self):
90 def changeset(self):
91 return self._changeset
91 return self._changeset
92 def manifest(self):
92 def manifest(self):
93 return self._manifest
93 return self._manifest
94 def manifestnode(self):
94 def manifestnode(self):
95 return self._changeset[0]
95 return self._changeset[0]
96
96
97 def rev(self):
97 def rev(self):
98 return self._rev
98 return self._rev
99 def node(self):
99 def node(self):
100 return self._node
100 return self._node
101 def hex(self):
101 def hex(self):
102 return hex(self._node)
102 return hex(self._node)
103 def user(self):
103 def user(self):
104 return self._changeset[1]
104 return self._changeset[1]
105 def date(self):
105 def date(self):
106 return self._changeset[2]
106 return self._changeset[2]
107 def files(self):
107 def files(self):
108 return self._changeset[3]
108 return self._changeset[3]
109 def description(self):
109 def description(self):
110 return self._changeset[4]
110 return self._changeset[4]
111 def branch(self):
111 def branch(self):
112 return self._changeset[5].get("branch")
112 return self._changeset[5].get("branch")
113 def extra(self):
113 def extra(self):
114 return self._changeset[5]
114 return self._changeset[5]
115 def tags(self):
115 def tags(self):
116 return self._repo.nodetags(self._node)
116 return self._repo.nodetags(self._node)
117
117
118 def parents(self):
118 def parents(self):
119 """return contexts for each parent changeset"""
119 """return contexts for each parent changeset"""
120 return self._parents
120 return self._parents
121
121
122 def p1(self):
122 def p1(self):
123 return self._parents[0]
123 return self._parents[0]
124
124
125 def p2(self):
125 def p2(self):
126 if len(self._parents) == 2:
126 if len(self._parents) == 2:
127 return self._parents[1]
127 return self._parents[1]
128 return changectx(self._repo, -1)
128 return changectx(self._repo, -1)
129
129
130 def children(self):
130 def children(self):
131 """return contexts for each child changeset"""
131 """return contexts for each child changeset"""
132 c = self._repo.changelog.children(self._node)
132 c = self._repo.changelog.children(self._node)
133 return [changectx(self._repo, x) for x in c]
133 return [changectx(self._repo, x) for x in c]
134
134
135 def ancestors(self):
135 def ancestors(self):
136 for a in self._repo.changelog.ancestors(self._rev):
136 for a in self._repo.changelog.ancestors(self._rev):
137 yield changectx(self._repo, a)
137 yield changectx(self._repo, a)
138
138
139 def descendants(self):
139 def descendants(self):
140 for d in self._repo.changelog.descendants(self._rev):
140 for d in self._repo.changelog.descendants(self._rev):
141 yield changectx(self._repo, d)
141 yield changectx(self._repo, d)
142
142
143 def _fileinfo(self, path):
143 def _fileinfo(self, path):
144 if '_manifest' in self.__dict__:
144 if '_manifest' in self.__dict__:
145 try:
145 try:
146 return self._manifest[path], self._manifest.flags(path)
146 return self._manifest[path], self._manifest.flags(path)
147 except KeyError:
147 except KeyError:
148 raise error.LookupError(self._node, path,
148 raise error.LookupError(self._node, path,
149 _('not found in manifest'))
149 _('not found in manifest'))
150 if '_manifestdelta' in self.__dict__ or path in self.files():
150 if '_manifestdelta' in self.__dict__ or path in self.files():
151 if path in self._manifestdelta:
151 if path in self._manifestdelta:
152 return self._manifestdelta[path], self._manifestdelta.flags(path)
152 return self._manifestdelta[path], self._manifestdelta.flags(path)
153 node, flag = self._repo.manifest.find(self._changeset[0], path)
153 node, flag = self._repo.manifest.find(self._changeset[0], path)
154 if not node:
154 if not node:
155 raise error.LookupError(self._node, path,
155 raise error.LookupError(self._node, path,
156 _('not found in manifest'))
156 _('not found in manifest'))
157
157
158 return node, flag
158 return node, flag
159
159
160 def filenode(self, path):
160 def filenode(self, path):
161 return self._fileinfo(path)[0]
161 return self._fileinfo(path)[0]
162
162
163 def flags(self, path):
163 def flags(self, path):
164 try:
164 try:
165 return self._fileinfo(path)[1]
165 return self._fileinfo(path)[1]
166 except error.LookupError:
166 except error.LookupError:
167 return ''
167 return ''
168
168
169 def filectx(self, path, fileid=None, filelog=None):
169 def filectx(self, path, fileid=None, filelog=None):
170 """get a file context from this changeset"""
170 """get a file context from this changeset"""
171 if fileid is None:
171 if fileid is None:
172 fileid = self.filenode(path)
172 fileid = self.filenode(path)
173 return filectx(self._repo, path, fileid=fileid,
173 return filectx(self._repo, path, fileid=fileid,
174 changectx=self, filelog=filelog)
174 changectx=self, filelog=filelog)
175
175
176 def ancestor(self, c2):
176 def ancestor(self, c2):
177 """
177 """
178 return the ancestor context of self and c2
178 return the ancestor context of self and c2
179 """
179 """
180 # deal with workingctxs
180 # deal with workingctxs
181 n2 = c2._node
181 n2 = c2._node
182 if n2 == None:
182 if n2 == None:
183 n2 = c2._parents[0]._node
183 n2 = c2._parents[0]._node
184 n = self._repo.changelog.ancestor(self._node, n2)
184 n = self._repo.changelog.ancestor(self._node, n2)
185 return changectx(self._repo, n)
185 return changectx(self._repo, n)
186
186
187 def walk(self, match):
187 def walk(self, match):
188 fset = set(match.files())
188 fset = set(match.files())
189 # for dirstate.walk, files=['.'] means "walk the whole tree".
189 # for dirstate.walk, files=['.'] means "walk the whole tree".
190 # follow that here, too
190 # follow that here, too
191 fset.discard('.')
191 fset.discard('.')
192 for fn in self:
192 for fn in self:
193 for ffn in fset:
193 for ffn in fset:
194 # match if the file is the exact name or a directory
194 # match if the file is the exact name or a directory
195 if ffn == fn or fn.startswith("%s/" % ffn):
195 if ffn == fn or fn.startswith("%s/" % ffn):
196 fset.remove(ffn)
196 fset.remove(ffn)
197 break
197 break
198 if match(fn):
198 if match(fn):
199 yield fn
199 yield fn
200 for fn in sorted(fset):
200 for fn in sorted(fset):
201 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
201 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
202 yield fn
202 yield fn
203
203
204 def sub(self, path):
204 def sub(self, path):
205 return subrepo.subrepo(self, path)
205 return subrepo.subrepo(self, path)
206
206
207 def diff(self, ctx2=None, match=None, **opts):
207 def diff(self, ctx2=None, match=None, **opts):
208 """Returns a diff generator for the given contexts and matcher"""
208 """Returns a diff generator for the given contexts and matcher"""
209 if ctx2 is None:
209 if ctx2 is None:
210 ctx2 = self.p1()
210 ctx2 = self.p1()
211 if ctx2 is not None and not isinstance(ctx2, changectx):
211 if ctx2 is not None and not isinstance(ctx2, changectx):
212 ctx2 = self._repo[ctx2]
212 ctx2 = self._repo[ctx2]
213 diffopts = patch.diffopts(self._repo.ui, opts)
213 diffopts = patch.diffopts(self._repo.ui, opts)
214 return patch.diff(self._repo, ctx2.node(), self.node(),
214 return patch.diff(self._repo, ctx2.node(), self.node(),
215 match=match, opts=diffopts)
215 match=match, opts=diffopts)
216
216
217 class filectx(object):
217 class filectx(object):
218 """A filecontext object makes access to data related to a particular
218 """A filecontext object makes access to data related to a particular
219 filerevision convenient."""
219 filerevision convenient."""
220 def __init__(self, repo, path, changeid=None, fileid=None,
220 def __init__(self, repo, path, changeid=None, fileid=None,
221 filelog=None, changectx=None):
221 filelog=None, changectx=None):
222 """changeid can be a changeset revision, node, or tag.
222 """changeid can be a changeset revision, node, or tag.
223 fileid can be a file revision or node."""
223 fileid can be a file revision or node."""
224 self._repo = repo
224 self._repo = repo
225 self._path = path
225 self._path = path
226
226
227 assert (changeid is not None
227 assert (changeid is not None
228 or fileid is not None
228 or fileid is not None
229 or changectx is not None), \
229 or changectx is not None), \
230 ("bad args: changeid=%r, fileid=%r, changectx=%r"
230 ("bad args: changeid=%r, fileid=%r, changectx=%r"
231 % (changeid, fileid, changectx))
231 % (changeid, fileid, changectx))
232
232
233 if filelog:
233 if filelog:
234 self._filelog = filelog
234 self._filelog = filelog
235
235
236 if changeid is not None:
236 if changeid is not None:
237 self._changeid = changeid
237 self._changeid = changeid
238 if changectx is not None:
238 if changectx is not None:
239 self._changectx = changectx
239 self._changectx = changectx
240 if fileid is not None:
240 if fileid is not None:
241 self._fileid = fileid
241 self._fileid = fileid
242
242
243 @propertycache
243 @propertycache
244 def _changectx(self):
244 def _changectx(self):
245 return changectx(self._repo, self._changeid)
245 return changectx(self._repo, self._changeid)
246
246
247 @propertycache
247 @propertycache
248 def _filelog(self):
248 def _filelog(self):
249 return self._repo.file(self._path)
249 return self._repo.file(self._path)
250
250
251 @propertycache
251 @propertycache
252 def _changeid(self):
252 def _changeid(self):
253 if '_changectx' in self.__dict__:
253 if '_changectx' in self.__dict__:
254 return self._changectx.rev()
254 return self._changectx.rev()
255 else:
255 else:
256 return self._filelog.linkrev(self._filerev)
256 return self._filelog.linkrev(self._filerev)
257
257
258 @propertycache
258 @propertycache
259 def _filenode(self):
259 def _filenode(self):
260 if '_fileid' in self.__dict__:
260 if '_fileid' in self.__dict__:
261 return self._filelog.lookup(self._fileid)
261 return self._filelog.lookup(self._fileid)
262 else:
262 else:
263 return self._changectx.filenode(self._path)
263 return self._changectx.filenode(self._path)
264
264
265 @propertycache
265 @propertycache
266 def _filerev(self):
266 def _filerev(self):
267 return self._filelog.rev(self._filenode)
267 return self._filelog.rev(self._filenode)
268
268
269 @propertycache
269 @propertycache
270 def _repopath(self):
270 def _repopath(self):
271 return self._path
271 return self._path
272
272
273 def __nonzero__(self):
273 def __nonzero__(self):
274 try:
274 try:
275 self._filenode
275 self._filenode
276 return True
276 return True
277 except error.LookupError:
277 except error.LookupError:
278 # file is missing
278 # file is missing
279 return False
279 return False
280
280
281 def __str__(self):
281 def __str__(self):
282 return "%s@%s" % (self.path(), short(self.node()))
282 return "%s@%s" % (self.path(), short(self.node()))
283
283
284 def __repr__(self):
284 def __repr__(self):
285 return "<filectx %s>" % str(self)
285 return "<filectx %s>" % str(self)
286
286
287 def __hash__(self):
287 def __hash__(self):
288 try:
288 try:
289 return hash((self._path, self._filenode))
289 return hash((self._path, self._filenode))
290 except AttributeError:
290 except AttributeError:
291 return id(self)
291 return id(self)
292
292
293 def __eq__(self, other):
293 def __eq__(self, other):
294 try:
294 try:
295 return (self._path == other._path
295 return (self._path == other._path
296 and self._filenode == other._filenode)
296 and self._filenode == other._filenode)
297 except AttributeError:
297 except AttributeError:
298 return False
298 return False
299
299
300 def __ne__(self, other):
300 def __ne__(self, other):
301 return not (self == other)
301 return not (self == other)
302
302
303 def filectx(self, fileid):
303 def filectx(self, fileid):
304 '''opens an arbitrary revision of the file without
304 '''opens an arbitrary revision of the file without
305 opening a new filelog'''
305 opening a new filelog'''
306 return filectx(self._repo, self._path, fileid=fileid,
306 return filectx(self._repo, self._path, fileid=fileid,
307 filelog=self._filelog)
307 filelog=self._filelog)
308
308
309 def filerev(self):
309 def filerev(self):
310 return self._filerev
310 return self._filerev
311 def filenode(self):
311 def filenode(self):
312 return self._filenode
312 return self._filenode
313 def flags(self):
313 def flags(self):
314 return self._changectx.flags(self._path)
314 return self._changectx.flags(self._path)
315 def filelog(self):
315 def filelog(self):
316 return self._filelog
316 return self._filelog
317
317
318 def rev(self):
318 def rev(self):
319 if '_changectx' in self.__dict__:
319 if '_changectx' in self.__dict__:
320 return self._changectx.rev()
320 return self._changectx.rev()
321 if '_changeid' in self.__dict__:
321 if '_changeid' in self.__dict__:
322 return self._changectx.rev()
322 return self._changectx.rev()
323 return self._filelog.linkrev(self._filerev)
323 return self._filelog.linkrev(self._filerev)
324
324
325 def linkrev(self):
325 def linkrev(self):
326 return self._filelog.linkrev(self._filerev)
326 return self._filelog.linkrev(self._filerev)
327 def node(self):
327 def node(self):
328 return self._changectx.node()
328 return self._changectx.node()
329 def hex(self):
329 def hex(self):
330 return hex(self.node())
330 return hex(self.node())
331 def user(self):
331 def user(self):
332 return self._changectx.user()
332 return self._changectx.user()
333 def date(self):
333 def date(self):
334 return self._changectx.date()
334 return self._changectx.date()
335 def files(self):
335 def files(self):
336 return self._changectx.files()
336 return self._changectx.files()
337 def description(self):
337 def description(self):
338 return self._changectx.description()
338 return self._changectx.description()
339 def branch(self):
339 def branch(self):
340 return self._changectx.branch()
340 return self._changectx.branch()
341 def extra(self):
341 def extra(self):
342 return self._changectx.extra()
342 return self._changectx.extra()
343 def manifest(self):
343 def manifest(self):
344 return self._changectx.manifest()
344 return self._changectx.manifest()
345 def changectx(self):
345 def changectx(self):
346 return self._changectx
346 return self._changectx
347
347
348 def data(self):
348 def data(self):
349 return self._filelog.read(self._filenode)
349 return self._filelog.read(self._filenode)
350 def path(self):
350 def path(self):
351 return self._path
351 return self._path
352 def size(self):
352 def size(self):
353 return self._filelog.size(self._filerev)
353 return self._filelog.size(self._filerev)
354
354
355 def cmp(self, text):
355 def cmp(self, fctx):
356 """compare text with stored file revision
356 """compare with other file context
357
357
358 returns True if text is different than what is stored.
358 returns True if different than fctx.
359 """
359 """
360 return self._filelog.cmp(self._filenode, text)
360 return self._filelog.cmp(self._filenode, fctx.data())
361
361
362 def renamed(self):
362 def renamed(self):
363 """check if file was actually renamed in this changeset revision
363 """check if file was actually renamed in this changeset revision
364
364
365 If rename logged in file revision, we report copy for changeset only
365 If rename logged in file revision, we report copy for changeset only
366 if file revisions linkrev points back to the changeset in question
366 if file revisions linkrev points back to the changeset in question
367 or both changeset parents contain different file revisions.
367 or both changeset parents contain different file revisions.
368 """
368 """
369
369
370 renamed = self._filelog.renamed(self._filenode)
370 renamed = self._filelog.renamed(self._filenode)
371 if not renamed:
371 if not renamed:
372 return renamed
372 return renamed
373
373
374 if self.rev() == self.linkrev():
374 if self.rev() == self.linkrev():
375 return renamed
375 return renamed
376
376
377 name = self.path()
377 name = self.path()
378 fnode = self._filenode
378 fnode = self._filenode
379 for p in self._changectx.parents():
379 for p in self._changectx.parents():
380 try:
380 try:
381 if fnode == p.filenode(name):
381 if fnode == p.filenode(name):
382 return None
382 return None
383 except error.LookupError:
383 except error.LookupError:
384 pass
384 pass
385 return renamed
385 return renamed
386
386
387 def parents(self):
387 def parents(self):
388 p = self._path
388 p = self._path
389 fl = self._filelog
389 fl = self._filelog
390 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
390 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
391
391
392 r = self._filelog.renamed(self._filenode)
392 r = self._filelog.renamed(self._filenode)
393 if r:
393 if r:
394 pl[0] = (r[0], r[1], None)
394 pl[0] = (r[0], r[1], None)
395
395
396 return [filectx(self._repo, p, fileid=n, filelog=l)
396 return [filectx(self._repo, p, fileid=n, filelog=l)
397 for p, n, l in pl if n != nullid]
397 for p, n, l in pl if n != nullid]
398
398
399 def children(self):
399 def children(self):
400 # hard for renames
400 # hard for renames
401 c = self._filelog.children(self._filenode)
401 c = self._filelog.children(self._filenode)
402 return [filectx(self._repo, self._path, fileid=x,
402 return [filectx(self._repo, self._path, fileid=x,
403 filelog=self._filelog) for x in c]
403 filelog=self._filelog) for x in c]
404
404
405 def annotate(self, follow=False, linenumber=None):
405 def annotate(self, follow=False, linenumber=None):
406 '''returns a list of tuples of (ctx, line) for each line
406 '''returns a list of tuples of (ctx, line) for each line
407 in the file, where ctx is the filectx of the node where
407 in the file, where ctx is the filectx of the node where
408 that line was last changed.
408 that line was last changed.
409 This returns tuples of ((ctx, linenumber), line) for each line,
409 This returns tuples of ((ctx, linenumber), line) for each line,
410 if "linenumber" parameter is NOT "None".
410 if "linenumber" parameter is NOT "None".
411 In such tuples, linenumber means one at the first appearance
411 In such tuples, linenumber means one at the first appearance
412 in the managed file.
412 in the managed file.
413 To reduce annotation cost,
413 To reduce annotation cost,
414 this returns fixed value(False is used) as linenumber,
414 this returns fixed value(False is used) as linenumber,
415 if "linenumber" parameter is "False".'''
415 if "linenumber" parameter is "False".'''
416
416
417 def decorate_compat(text, rev):
417 def decorate_compat(text, rev):
418 return ([rev] * len(text.splitlines()), text)
418 return ([rev] * len(text.splitlines()), text)
419
419
420 def without_linenumber(text, rev):
420 def without_linenumber(text, rev):
421 return ([(rev, False)] * len(text.splitlines()), text)
421 return ([(rev, False)] * len(text.splitlines()), text)
422
422
423 def with_linenumber(text, rev):
423 def with_linenumber(text, rev):
424 size = len(text.splitlines())
424 size = len(text.splitlines())
425 return ([(rev, i) for i in xrange(1, size + 1)], text)
425 return ([(rev, i) for i in xrange(1, size + 1)], text)
426
426
427 decorate = (((linenumber is None) and decorate_compat) or
427 decorate = (((linenumber is None) and decorate_compat) or
428 (linenumber and with_linenumber) or
428 (linenumber and with_linenumber) or
429 without_linenumber)
429 without_linenumber)
430
430
431 def pair(parent, child):
431 def pair(parent, child):
432 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
432 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
433 child[0][b1:b2] = parent[0][a1:a2]
433 child[0][b1:b2] = parent[0][a1:a2]
434 return child
434 return child
435
435
436 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
436 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
437 def getctx(path, fileid):
437 def getctx(path, fileid):
438 log = path == self._path and self._filelog or getlog(path)
438 log = path == self._path and self._filelog or getlog(path)
439 return filectx(self._repo, path, fileid=fileid, filelog=log)
439 return filectx(self._repo, path, fileid=fileid, filelog=log)
440 getctx = util.lrucachefunc(getctx)
440 getctx = util.lrucachefunc(getctx)
441
441
442 def parents(f):
442 def parents(f):
443 # we want to reuse filectx objects as much as possible
443 # we want to reuse filectx objects as much as possible
444 p = f._path
444 p = f._path
445 if f._filerev is None: # working dir
445 if f._filerev is None: # working dir
446 pl = [(n.path(), n.filerev()) for n in f.parents()]
446 pl = [(n.path(), n.filerev()) for n in f.parents()]
447 else:
447 else:
448 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
448 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
449
449
450 if follow:
450 if follow:
451 r = f.renamed()
451 r = f.renamed()
452 if r:
452 if r:
453 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
453 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
454
454
455 return [getctx(p, n) for p, n in pl if n != nullrev]
455 return [getctx(p, n) for p, n in pl if n != nullrev]
456
456
457 # use linkrev to find the first changeset where self appeared
457 # use linkrev to find the first changeset where self appeared
458 if self.rev() != self.linkrev():
458 if self.rev() != self.linkrev():
459 base = self.filectx(self.filerev())
459 base = self.filectx(self.filerev())
460 else:
460 else:
461 base = self
461 base = self
462
462
463 # find all ancestors
463 # find all ancestors
464 needed = {base: 1}
464 needed = {base: 1}
465 visit = [base]
465 visit = [base]
466 files = [base._path]
466 files = [base._path]
467 while visit:
467 while visit:
468 f = visit.pop(0)
468 f = visit.pop(0)
469 for p in parents(f):
469 for p in parents(f):
470 if p not in needed:
470 if p not in needed:
471 needed[p] = 1
471 needed[p] = 1
472 visit.append(p)
472 visit.append(p)
473 if p._path not in files:
473 if p._path not in files:
474 files.append(p._path)
474 files.append(p._path)
475 else:
475 else:
476 # count how many times we'll use this
476 # count how many times we'll use this
477 needed[p] += 1
477 needed[p] += 1
478
478
479 # sort by revision (per file) which is a topological order
479 # sort by revision (per file) which is a topological order
480 visit = []
480 visit = []
481 for f in files:
481 for f in files:
482 visit.extend(n for n in needed if n._path == f)
482 visit.extend(n for n in needed if n._path == f)
483
483
484 hist = {}
484 hist = {}
485 for f in sorted(visit, key=lambda x: x.rev()):
485 for f in sorted(visit, key=lambda x: x.rev()):
486 curr = decorate(f.data(), f)
486 curr = decorate(f.data(), f)
487 for p in parents(f):
487 for p in parents(f):
488 curr = pair(hist[p], curr)
488 curr = pair(hist[p], curr)
489 # trim the history of unneeded revs
489 # trim the history of unneeded revs
490 needed[p] -= 1
490 needed[p] -= 1
491 if not needed[p]:
491 if not needed[p]:
492 del hist[p]
492 del hist[p]
493 hist[f] = curr
493 hist[f] = curr
494
494
495 return zip(hist[f][0], hist[f][1].splitlines(True))
495 return zip(hist[f][0], hist[f][1].splitlines(True))
496
496
497 def ancestor(self, fc2, actx=None):
497 def ancestor(self, fc2, actx=None):
498 """
498 """
499 find the common ancestor file context, if any, of self, and fc2
499 find the common ancestor file context, if any, of self, and fc2
500
500
501 If actx is given, it must be the changectx of the common ancestor
501 If actx is given, it must be the changectx of the common ancestor
502 of self's and fc2's respective changesets.
502 of self's and fc2's respective changesets.
503 """
503 """
504
504
505 if actx is None:
505 if actx is None:
506 actx = self.changectx().ancestor(fc2.changectx())
506 actx = self.changectx().ancestor(fc2.changectx())
507
507
508 # the trivial case: changesets are unrelated, files must be too
508 # the trivial case: changesets are unrelated, files must be too
509 if not actx:
509 if not actx:
510 return None
510 return None
511
511
512 # the easy case: no (relevant) renames
512 # the easy case: no (relevant) renames
513 if fc2.path() == self.path() and self.path() in actx:
513 if fc2.path() == self.path() and self.path() in actx:
514 return actx[self.path()]
514 return actx[self.path()]
515 acache = {}
515 acache = {}
516
516
517 # prime the ancestor cache for the working directory
517 # prime the ancestor cache for the working directory
518 for c in (self, fc2):
518 for c in (self, fc2):
519 if c._filerev is None:
519 if c._filerev is None:
520 pl = [(n.path(), n.filenode()) for n in c.parents()]
520 pl = [(n.path(), n.filenode()) for n in c.parents()]
521 acache[(c._path, None)] = pl
521 acache[(c._path, None)] = pl
522
522
523 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
523 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
524 def parents(vertex):
524 def parents(vertex):
525 if vertex in acache:
525 if vertex in acache:
526 return acache[vertex]
526 return acache[vertex]
527 f, n = vertex
527 f, n = vertex
528 if f not in flcache:
528 if f not in flcache:
529 flcache[f] = self._repo.file(f)
529 flcache[f] = self._repo.file(f)
530 fl = flcache[f]
530 fl = flcache[f]
531 pl = [(f, p) for p in fl.parents(n) if p != nullid]
531 pl = [(f, p) for p in fl.parents(n) if p != nullid]
532 re = fl.renamed(n)
532 re = fl.renamed(n)
533 if re:
533 if re:
534 pl.append(re)
534 pl.append(re)
535 acache[vertex] = pl
535 acache[vertex] = pl
536 return pl
536 return pl
537
537
538 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
538 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
539 v = ancestor.ancestor(a, b, parents)
539 v = ancestor.ancestor(a, b, parents)
540 if v:
540 if v:
541 f, n = v
541 f, n = v
542 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
542 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
543
543
544 return None
544 return None
545
545
546 def ancestors(self):
546 def ancestors(self):
547 seen = set(str(self))
547 seen = set(str(self))
548 visit = [self]
548 visit = [self]
549 while visit:
549 while visit:
550 for parent in visit.pop(0).parents():
550 for parent in visit.pop(0).parents():
551 s = str(parent)
551 s = str(parent)
552 if s not in seen:
552 if s not in seen:
553 visit.append(parent)
553 visit.append(parent)
554 seen.add(s)
554 seen.add(s)
555 yield parent
555 yield parent
556
556
557 class workingctx(changectx):
557 class workingctx(changectx):
558 """A workingctx object makes access to data related to
558 """A workingctx object makes access to data related to
559 the current working directory convenient.
559 the current working directory convenient.
560 date - any valid date string or (unixtime, offset), or None.
560 date - any valid date string or (unixtime, offset), or None.
561 user - username string, or None.
561 user - username string, or None.
562 extra - a dictionary of extra values, or None.
562 extra - a dictionary of extra values, or None.
563 changes - a list of file lists as returned by localrepo.status()
563 changes - a list of file lists as returned by localrepo.status()
564 or None to use the repository status.
564 or None to use the repository status.
565 """
565 """
566 def __init__(self, repo, text="", user=None, date=None, extra=None,
566 def __init__(self, repo, text="", user=None, date=None, extra=None,
567 changes=None):
567 changes=None):
568 self._repo = repo
568 self._repo = repo
569 self._rev = None
569 self._rev = None
570 self._node = None
570 self._node = None
571 self._text = text
571 self._text = text
572 if date:
572 if date:
573 self._date = util.parsedate(date)
573 self._date = util.parsedate(date)
574 if user:
574 if user:
575 self._user = user
575 self._user = user
576 if changes:
576 if changes:
577 self._status = list(changes[:4])
577 self._status = list(changes[:4])
578 self._unknown = changes[4]
578 self._unknown = changes[4]
579 self._ignored = changes[5]
579 self._ignored = changes[5]
580 self._clean = changes[6]
580 self._clean = changes[6]
581 else:
581 else:
582 self._unknown = None
582 self._unknown = None
583 self._ignored = None
583 self._ignored = None
584 self._clean = None
584 self._clean = None
585
585
586 self._extra = {}
586 self._extra = {}
587 if extra:
587 if extra:
588 self._extra = extra.copy()
588 self._extra = extra.copy()
589 if 'branch' not in self._extra:
589 if 'branch' not in self._extra:
590 branch = self._repo.dirstate.branch()
590 branch = self._repo.dirstate.branch()
591 try:
591 try:
592 branch = branch.decode('UTF-8').encode('UTF-8')
592 branch = branch.decode('UTF-8').encode('UTF-8')
593 except UnicodeDecodeError:
593 except UnicodeDecodeError:
594 raise util.Abort(_('branch name not in UTF-8!'))
594 raise util.Abort(_('branch name not in UTF-8!'))
595 self._extra['branch'] = branch
595 self._extra['branch'] = branch
596 if self._extra['branch'] == '':
596 if self._extra['branch'] == '':
597 self._extra['branch'] = 'default'
597 self._extra['branch'] = 'default'
598
598
599 def __str__(self):
599 def __str__(self):
600 return str(self._parents[0]) + "+"
600 return str(self._parents[0]) + "+"
601
601
602 def __nonzero__(self):
602 def __nonzero__(self):
603 return True
603 return True
604
604
605 def __contains__(self, key):
605 def __contains__(self, key):
606 return self._repo.dirstate[key] not in "?r"
606 return self._repo.dirstate[key] not in "?r"
607
607
608 @propertycache
608 @propertycache
609 def _manifest(self):
609 def _manifest(self):
610 """generate a manifest corresponding to the working directory"""
610 """generate a manifest corresponding to the working directory"""
611
611
612 if self._unknown is None:
612 if self._unknown is None:
613 self.status(unknown=True)
613 self.status(unknown=True)
614
614
615 man = self._parents[0].manifest().copy()
615 man = self._parents[0].manifest().copy()
616 copied = self._repo.dirstate.copies()
616 copied = self._repo.dirstate.copies()
617 if len(self._parents) > 1:
617 if len(self._parents) > 1:
618 man2 = self.p2().manifest()
618 man2 = self.p2().manifest()
619 def getman(f):
619 def getman(f):
620 if f in man:
620 if f in man:
621 return man
621 return man
622 return man2
622 return man2
623 else:
623 else:
624 getman = lambda f: man
624 getman = lambda f: man
625 def cf(f):
625 def cf(f):
626 f = copied.get(f, f)
626 f = copied.get(f, f)
627 return getman(f).flags(f)
627 return getman(f).flags(f)
628 ff = self._repo.dirstate.flagfunc(cf)
628 ff = self._repo.dirstate.flagfunc(cf)
629 modified, added, removed, deleted = self._status
629 modified, added, removed, deleted = self._status
630 unknown = self._unknown
630 unknown = self._unknown
631 for i, l in (("a", added), ("m", modified), ("u", unknown)):
631 for i, l in (("a", added), ("m", modified), ("u", unknown)):
632 for f in l:
632 for f in l:
633 orig = copied.get(f, f)
633 orig = copied.get(f, f)
634 man[f] = getman(orig).get(orig, nullid) + i
634 man[f] = getman(orig).get(orig, nullid) + i
635 try:
635 try:
636 man.set(f, ff(f))
636 man.set(f, ff(f))
637 except OSError:
637 except OSError:
638 pass
638 pass
639
639
640 for f in deleted + removed:
640 for f in deleted + removed:
641 if f in man:
641 if f in man:
642 del man[f]
642 del man[f]
643
643
644 return man
644 return man
645
645
646 @propertycache
646 @propertycache
647 def _status(self):
647 def _status(self):
648 return self._repo.status()[:4]
648 return self._repo.status()[:4]
649
649
650 @propertycache
650 @propertycache
651 def _user(self):
651 def _user(self):
652 return self._repo.ui.username()
652 return self._repo.ui.username()
653
653
654 @propertycache
654 @propertycache
655 def _date(self):
655 def _date(self):
656 return util.makedate()
656 return util.makedate()
657
657
658 @propertycache
658 @propertycache
659 def _parents(self):
659 def _parents(self):
660 p = self._repo.dirstate.parents()
660 p = self._repo.dirstate.parents()
661 if p[1] == nullid:
661 if p[1] == nullid:
662 p = p[:-1]
662 p = p[:-1]
663 self._parents = [changectx(self._repo, x) for x in p]
663 self._parents = [changectx(self._repo, x) for x in p]
664 return self._parents
664 return self._parents
665
665
666 def status(self, ignored=False, clean=False, unknown=False):
666 def status(self, ignored=False, clean=False, unknown=False):
667 """Explicit status query
667 """Explicit status query
668 Unless this method is used to query the working copy status, the
668 Unless this method is used to query the working copy status, the
669 _status property will implicitly read the status using its default
669 _status property will implicitly read the status using its default
670 arguments."""
670 arguments."""
671 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
671 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
672 self._unknown = self._ignored = self._clean = None
672 self._unknown = self._ignored = self._clean = None
673 if unknown:
673 if unknown:
674 self._unknown = stat[4]
674 self._unknown = stat[4]
675 if ignored:
675 if ignored:
676 self._ignored = stat[5]
676 self._ignored = stat[5]
677 if clean:
677 if clean:
678 self._clean = stat[6]
678 self._clean = stat[6]
679 self._status = stat[:4]
679 self._status = stat[:4]
680 return stat
680 return stat
681
681
682 def manifest(self):
682 def manifest(self):
683 return self._manifest
683 return self._manifest
684 def user(self):
684 def user(self):
685 return self._user or self._repo.ui.username()
685 return self._user or self._repo.ui.username()
686 def date(self):
686 def date(self):
687 return self._date
687 return self._date
688 def description(self):
688 def description(self):
689 return self._text
689 return self._text
690 def files(self):
690 def files(self):
691 return sorted(self._status[0] + self._status[1] + self._status[2])
691 return sorted(self._status[0] + self._status[1] + self._status[2])
692
692
693 def modified(self):
693 def modified(self):
694 return self._status[0]
694 return self._status[0]
695 def added(self):
695 def added(self):
696 return self._status[1]
696 return self._status[1]
697 def removed(self):
697 def removed(self):
698 return self._status[2]
698 return self._status[2]
699 def deleted(self):
699 def deleted(self):
700 return self._status[3]
700 return self._status[3]
701 def unknown(self):
701 def unknown(self):
702 assert self._unknown is not None # must call status first
702 assert self._unknown is not None # must call status first
703 return self._unknown
703 return self._unknown
704 def ignored(self):
704 def ignored(self):
705 assert self._ignored is not None # must call status first
705 assert self._ignored is not None # must call status first
706 return self._ignored
706 return self._ignored
707 def clean(self):
707 def clean(self):
708 assert self._clean is not None # must call status first
708 assert self._clean is not None # must call status first
709 return self._clean
709 return self._clean
710 def branch(self):
710 def branch(self):
711 return self._extra['branch']
711 return self._extra['branch']
712 def extra(self):
712 def extra(self):
713 return self._extra
713 return self._extra
714
714
715 def tags(self):
715 def tags(self):
716 t = []
716 t = []
717 [t.extend(p.tags()) for p in self.parents()]
717 [t.extend(p.tags()) for p in self.parents()]
718 return t
718 return t
719
719
720 def children(self):
720 def children(self):
721 return []
721 return []
722
722
723 def flags(self, path):
723 def flags(self, path):
724 if '_manifest' in self.__dict__:
724 if '_manifest' in self.__dict__:
725 try:
725 try:
726 return self._manifest.flags(path)
726 return self._manifest.flags(path)
727 except KeyError:
727 except KeyError:
728 return ''
728 return ''
729
729
730 orig = self._repo.dirstate.copies().get(path, path)
730 orig = self._repo.dirstate.copies().get(path, path)
731
731
732 def findflag(ctx):
732 def findflag(ctx):
733 mnode = ctx.changeset()[0]
733 mnode = ctx.changeset()[0]
734 node, flag = self._repo.manifest.find(mnode, orig)
734 node, flag = self._repo.manifest.find(mnode, orig)
735 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
735 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
736 try:
736 try:
737 return ff(path)
737 return ff(path)
738 except OSError:
738 except OSError:
739 pass
739 pass
740
740
741 flag = findflag(self._parents[0])
741 flag = findflag(self._parents[0])
742 if flag is None and len(self.parents()) > 1:
742 if flag is None and len(self.parents()) > 1:
743 flag = findflag(self._parents[1])
743 flag = findflag(self._parents[1])
744 if flag is None or self._repo.dirstate[path] == 'r':
744 if flag is None or self._repo.dirstate[path] == 'r':
745 return ''
745 return ''
746 return flag
746 return flag
747
747
748 def filectx(self, path, filelog=None):
748 def filectx(self, path, filelog=None):
749 """get a file context from the working directory"""
749 """get a file context from the working directory"""
750 return workingfilectx(self._repo, path, workingctx=self,
750 return workingfilectx(self._repo, path, workingctx=self,
751 filelog=filelog)
751 filelog=filelog)
752
752
753 def ancestor(self, c2):
753 def ancestor(self, c2):
754 """return the ancestor context of self and c2"""
754 """return the ancestor context of self and c2"""
755 return self._parents[0].ancestor(c2) # punt on two parents for now
755 return self._parents[0].ancestor(c2) # punt on two parents for now
756
756
757 def walk(self, match):
757 def walk(self, match):
758 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
758 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
759 True, False))
759 True, False))
760
760
761 def dirty(self, missing=False):
761 def dirty(self, missing=False):
762 "check whether a working directory is modified"
762 "check whether a working directory is modified"
763 # check subrepos first
763 # check subrepos first
764 for s in self.substate:
764 for s in self.substate:
765 if self.sub(s).dirty():
765 if self.sub(s).dirty():
766 return True
766 return True
767 # check current working dir
767 # check current working dir
768 return (self.p2() or self.branch() != self.p1().branch() or
768 return (self.p2() or self.branch() != self.p1().branch() or
769 self.modified() or self.added() or self.removed() or
769 self.modified() or self.added() or self.removed() or
770 (missing and self.deleted()))
770 (missing and self.deleted()))
771
771
772 def add(self, list):
772 def add(self, list):
773 wlock = self._repo.wlock()
773 wlock = self._repo.wlock()
774 ui, ds = self._repo.ui, self._repo.dirstate
774 ui, ds = self._repo.ui, self._repo.dirstate
775 try:
775 try:
776 rejected = []
776 rejected = []
777 for f in list:
777 for f in list:
778 p = self._repo.wjoin(f)
778 p = self._repo.wjoin(f)
779 try:
779 try:
780 st = os.lstat(p)
780 st = os.lstat(p)
781 except:
781 except:
782 ui.warn(_("%s does not exist!\n") % f)
782 ui.warn(_("%s does not exist!\n") % f)
783 rejected.append(f)
783 rejected.append(f)
784 continue
784 continue
785 if st.st_size > 10000000:
785 if st.st_size > 10000000:
786 ui.warn(_("%s: up to %d MB of RAM may be required "
786 ui.warn(_("%s: up to %d MB of RAM may be required "
787 "to manage this file\n"
787 "to manage this file\n"
788 "(use 'hg revert %s' to cancel the "
788 "(use 'hg revert %s' to cancel the "
789 "pending addition)\n")
789 "pending addition)\n")
790 % (f, 3 * st.st_size // 1000000, f))
790 % (f, 3 * st.st_size // 1000000, f))
791 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
791 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
792 ui.warn(_("%s not added: only files and symlinks "
792 ui.warn(_("%s not added: only files and symlinks "
793 "supported currently\n") % f)
793 "supported currently\n") % f)
794 rejected.append(p)
794 rejected.append(p)
795 elif ds[f] in 'amn':
795 elif ds[f] in 'amn':
796 ui.warn(_("%s already tracked!\n") % f)
796 ui.warn(_("%s already tracked!\n") % f)
797 elif ds[f] == 'r':
797 elif ds[f] == 'r':
798 ds.normallookup(f)
798 ds.normallookup(f)
799 else:
799 else:
800 ds.add(f)
800 ds.add(f)
801 return rejected
801 return rejected
802 finally:
802 finally:
803 wlock.release()
803 wlock.release()
804
804
805 def forget(self, list):
805 def forget(self, list):
806 wlock = self._repo.wlock()
806 wlock = self._repo.wlock()
807 try:
807 try:
808 for f in list:
808 for f in list:
809 if self._repo.dirstate[f] != 'a':
809 if self._repo.dirstate[f] != 'a':
810 self._repo.ui.warn(_("%s not added!\n") % f)
810 self._repo.ui.warn(_("%s not added!\n") % f)
811 else:
811 else:
812 self._repo.dirstate.forget(f)
812 self._repo.dirstate.forget(f)
813 finally:
813 finally:
814 wlock.release()
814 wlock.release()
815
815
816 def remove(self, list, unlink=False):
816 def remove(self, list, unlink=False):
817 if unlink:
817 if unlink:
818 for f in list:
818 for f in list:
819 try:
819 try:
820 util.unlink(self._repo.wjoin(f))
820 util.unlink(self._repo.wjoin(f))
821 except OSError, inst:
821 except OSError, inst:
822 if inst.errno != errno.ENOENT:
822 if inst.errno != errno.ENOENT:
823 raise
823 raise
824 wlock = self._repo.wlock()
824 wlock = self._repo.wlock()
825 try:
825 try:
826 for f in list:
826 for f in list:
827 if unlink and os.path.exists(self._repo.wjoin(f)):
827 if unlink and os.path.exists(self._repo.wjoin(f)):
828 self._repo.ui.warn(_("%s still exists!\n") % f)
828 self._repo.ui.warn(_("%s still exists!\n") % f)
829 elif self._repo.dirstate[f] == 'a':
829 elif self._repo.dirstate[f] == 'a':
830 self._repo.dirstate.forget(f)
830 self._repo.dirstate.forget(f)
831 elif f not in self._repo.dirstate:
831 elif f not in self._repo.dirstate:
832 self._repo.ui.warn(_("%s not tracked!\n") % f)
832 self._repo.ui.warn(_("%s not tracked!\n") % f)
833 else:
833 else:
834 self._repo.dirstate.remove(f)
834 self._repo.dirstate.remove(f)
835 finally:
835 finally:
836 wlock.release()
836 wlock.release()
837
837
838 def undelete(self, list):
838 def undelete(self, list):
839 pctxs = self.parents()
839 pctxs = self.parents()
840 wlock = self._repo.wlock()
840 wlock = self._repo.wlock()
841 try:
841 try:
842 for f in list:
842 for f in list:
843 if self._repo.dirstate[f] != 'r':
843 if self._repo.dirstate[f] != 'r':
844 self._repo.ui.warn(_("%s not removed!\n") % f)
844 self._repo.ui.warn(_("%s not removed!\n") % f)
845 else:
845 else:
846 fctx = f in pctxs[0] and pctxs[0] or pctxs[1]
846 fctx = f in pctxs[0] and pctxs[0] or pctxs[1]
847 t = fctx.data()
847 t = fctx.data()
848 self._repo.wwrite(f, t, fctx.flags())
848 self._repo.wwrite(f, t, fctx.flags())
849 self._repo.dirstate.normal(f)
849 self._repo.dirstate.normal(f)
850 finally:
850 finally:
851 wlock.release()
851 wlock.release()
852
852
853 def copy(self, source, dest):
853 def copy(self, source, dest):
854 p = self._repo.wjoin(dest)
854 p = self._repo.wjoin(dest)
855 if not (os.path.exists(p) or os.path.islink(p)):
855 if not (os.path.exists(p) or os.path.islink(p)):
856 self._repo.ui.warn(_("%s does not exist!\n") % dest)
856 self._repo.ui.warn(_("%s does not exist!\n") % dest)
857 elif not (os.path.isfile(p) or os.path.islink(p)):
857 elif not (os.path.isfile(p) or os.path.islink(p)):
858 self._repo.ui.warn(_("copy failed: %s is not a file or a "
858 self._repo.ui.warn(_("copy failed: %s is not a file or a "
859 "symbolic link\n") % dest)
859 "symbolic link\n") % dest)
860 else:
860 else:
861 wlock = self._repo.wlock()
861 wlock = self._repo.wlock()
862 try:
862 try:
863 if self._repo.dirstate[dest] in '?r':
863 if self._repo.dirstate[dest] in '?r':
864 self._repo.dirstate.add(dest)
864 self._repo.dirstate.add(dest)
865 self._repo.dirstate.copy(source, dest)
865 self._repo.dirstate.copy(source, dest)
866 finally:
866 finally:
867 wlock.release()
867 wlock.release()
868
868
869 class workingfilectx(filectx):
869 class workingfilectx(filectx):
870 """A workingfilectx object makes access to data related to a particular
870 """A workingfilectx object makes access to data related to a particular
871 file in the working directory convenient."""
871 file in the working directory convenient."""
872 def __init__(self, repo, path, filelog=None, workingctx=None):
872 def __init__(self, repo, path, filelog=None, workingctx=None):
873 """changeid can be a changeset revision, node, or tag.
873 """changeid can be a changeset revision, node, or tag.
874 fileid can be a file revision or node."""
874 fileid can be a file revision or node."""
875 self._repo = repo
875 self._repo = repo
876 self._path = path
876 self._path = path
877 self._changeid = None
877 self._changeid = None
878 self._filerev = self._filenode = None
878 self._filerev = self._filenode = None
879
879
880 if filelog:
880 if filelog:
881 self._filelog = filelog
881 self._filelog = filelog
882 if workingctx:
882 if workingctx:
883 self._changectx = workingctx
883 self._changectx = workingctx
884
884
885 @propertycache
885 @propertycache
886 def _changectx(self):
886 def _changectx(self):
887 return workingctx(self._repo)
887 return workingctx(self._repo)
888
888
889 def __nonzero__(self):
889 def __nonzero__(self):
890 return True
890 return True
891
891
892 def __str__(self):
892 def __str__(self):
893 return "%s@%s" % (self.path(), self._changectx)
893 return "%s@%s" % (self.path(), self._changectx)
894
894
895 def data(self):
895 def data(self):
896 return self._repo.wread(self._path)
896 return self._repo.wread(self._path)
897 def renamed(self):
897 def renamed(self):
898 rp = self._repo.dirstate.copied(self._path)
898 rp = self._repo.dirstate.copied(self._path)
899 if not rp:
899 if not rp:
900 return None
900 return None
901 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
901 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
902
902
903 def parents(self):
903 def parents(self):
904 '''return parent filectxs, following copies if necessary'''
904 '''return parent filectxs, following copies if necessary'''
905 def filenode(ctx, path):
905 def filenode(ctx, path):
906 return ctx._manifest.get(path, nullid)
906 return ctx._manifest.get(path, nullid)
907
907
908 path = self._path
908 path = self._path
909 fl = self._filelog
909 fl = self._filelog
910 pcl = self._changectx._parents
910 pcl = self._changectx._parents
911 renamed = self.renamed()
911 renamed = self.renamed()
912
912
913 if renamed:
913 if renamed:
914 pl = [renamed + (None,)]
914 pl = [renamed + (None,)]
915 else:
915 else:
916 pl = [(path, filenode(pcl[0], path), fl)]
916 pl = [(path, filenode(pcl[0], path), fl)]
917
917
918 for pc in pcl[1:]:
918 for pc in pcl[1:]:
919 pl.append((path, filenode(pc, path), fl))
919 pl.append((path, filenode(pc, path), fl))
920
920
921 return [filectx(self._repo, p, fileid=n, filelog=l)
921 return [filectx(self._repo, p, fileid=n, filelog=l)
922 for p, n, l in pl if n != nullid]
922 for p, n, l in pl if n != nullid]
923
923
924 def children(self):
924 def children(self):
925 return []
925 return []
926
926
927 def size(self):
927 def size(self):
928 return os.lstat(self._repo.wjoin(self._path)).st_size
928 return os.lstat(self._repo.wjoin(self._path)).st_size
929 def date(self):
929 def date(self):
930 t, tz = self._changectx.date()
930 t, tz = self._changectx.date()
931 try:
931 try:
932 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
932 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
933 except OSError, err:
933 except OSError, err:
934 if err.errno != errno.ENOENT:
934 if err.errno != errno.ENOENT:
935 raise
935 raise
936 return (t, tz)
936 return (t, tz)
937
937
938 def cmp(self, text):
938 def cmp(self, fctx):
939 """compare text with disk content
939 """compare with other file context
940
940
941 returns True if text is different than what is on disk.
941 returns True if different than fctx.
942 """
942 """
943 return self._repo.wread(self._path) != text
943 # fctx should be a filectx (not a wfctx)
944 # invert comparison to reuse the same code path
945 return fctx.cmp(self)
944
946
945 class memctx(object):
947 class memctx(object):
946 """Use memctx to perform in-memory commits via localrepo.commitctx().
948 """Use memctx to perform in-memory commits via localrepo.commitctx().
947
949
948 Revision information is supplied at initialization time while
950 Revision information is supplied at initialization time while
949 related files data and is made available through a callback
951 related files data and is made available through a callback
950 mechanism. 'repo' is the current localrepo, 'parents' is a
952 mechanism. 'repo' is the current localrepo, 'parents' is a
951 sequence of two parent revisions identifiers (pass None for every
953 sequence of two parent revisions identifiers (pass None for every
952 missing parent), 'text' is the commit message and 'files' lists
954 missing parent), 'text' is the commit message and 'files' lists
953 names of files touched by the revision (normalized and relative to
955 names of files touched by the revision (normalized and relative to
954 repository root).
956 repository root).
955
957
956 filectxfn(repo, memctx, path) is a callable receiving the
958 filectxfn(repo, memctx, path) is a callable receiving the
957 repository, the current memctx object and the normalized path of
959 repository, the current memctx object and the normalized path of
958 requested file, relative to repository root. It is fired by the
960 requested file, relative to repository root. It is fired by the
959 commit function for every file in 'files', but calls order is
961 commit function for every file in 'files', but calls order is
960 undefined. If the file is available in the revision being
962 undefined. If the file is available in the revision being
961 committed (updated or added), filectxfn returns a memfilectx
963 committed (updated or added), filectxfn returns a memfilectx
962 object. If the file was removed, filectxfn raises an
964 object. If the file was removed, filectxfn raises an
963 IOError. Moved files are represented by marking the source file
965 IOError. Moved files are represented by marking the source file
964 removed and the new file added with copy information (see
966 removed and the new file added with copy information (see
965 memfilectx).
967 memfilectx).
966
968
967 user receives the committer name and defaults to current
969 user receives the committer name and defaults to current
968 repository username, date is the commit date in any format
970 repository username, date is the commit date in any format
969 supported by util.parsedate() and defaults to current date, extra
971 supported by util.parsedate() and defaults to current date, extra
970 is a dictionary of metadata or is left empty.
972 is a dictionary of metadata or is left empty.
971 """
973 """
972 def __init__(self, repo, parents, text, files, filectxfn, user=None,
974 def __init__(self, repo, parents, text, files, filectxfn, user=None,
973 date=None, extra=None):
975 date=None, extra=None):
974 self._repo = repo
976 self._repo = repo
975 self._rev = None
977 self._rev = None
976 self._node = None
978 self._node = None
977 self._text = text
979 self._text = text
978 self._date = date and util.parsedate(date) or util.makedate()
980 self._date = date and util.parsedate(date) or util.makedate()
979 self._user = user
981 self._user = user
980 parents = [(p or nullid) for p in parents]
982 parents = [(p or nullid) for p in parents]
981 p1, p2 = parents
983 p1, p2 = parents
982 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
984 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
983 files = sorted(set(files))
985 files = sorted(set(files))
984 self._status = [files, [], [], [], []]
986 self._status = [files, [], [], [], []]
985 self._filectxfn = filectxfn
987 self._filectxfn = filectxfn
986
988
987 self._extra = extra and extra.copy() or {}
989 self._extra = extra and extra.copy() or {}
988 if 'branch' not in self._extra:
990 if 'branch' not in self._extra:
989 self._extra['branch'] = 'default'
991 self._extra['branch'] = 'default'
990 elif self._extra.get('branch') == '':
992 elif self._extra.get('branch') == '':
991 self._extra['branch'] = 'default'
993 self._extra['branch'] = 'default'
992
994
993 def __str__(self):
995 def __str__(self):
994 return str(self._parents[0]) + "+"
996 return str(self._parents[0]) + "+"
995
997
996 def __int__(self):
998 def __int__(self):
997 return self._rev
999 return self._rev
998
1000
999 def __nonzero__(self):
1001 def __nonzero__(self):
1000 return True
1002 return True
1001
1003
1002 def __getitem__(self, key):
1004 def __getitem__(self, key):
1003 return self.filectx(key)
1005 return self.filectx(key)
1004
1006
1005 def p1(self):
1007 def p1(self):
1006 return self._parents[0]
1008 return self._parents[0]
1007 def p2(self):
1009 def p2(self):
1008 return self._parents[1]
1010 return self._parents[1]
1009
1011
1010 def user(self):
1012 def user(self):
1011 return self._user or self._repo.ui.username()
1013 return self._user or self._repo.ui.username()
1012 def date(self):
1014 def date(self):
1013 return self._date
1015 return self._date
1014 def description(self):
1016 def description(self):
1015 return self._text
1017 return self._text
1016 def files(self):
1018 def files(self):
1017 return self.modified()
1019 return self.modified()
1018 def modified(self):
1020 def modified(self):
1019 return self._status[0]
1021 return self._status[0]
1020 def added(self):
1022 def added(self):
1021 return self._status[1]
1023 return self._status[1]
1022 def removed(self):
1024 def removed(self):
1023 return self._status[2]
1025 return self._status[2]
1024 def deleted(self):
1026 def deleted(self):
1025 return self._status[3]
1027 return self._status[3]
1026 def unknown(self):
1028 def unknown(self):
1027 return self._status[4]
1029 return self._status[4]
1028 def ignored(self):
1030 def ignored(self):
1029 return self._status[5]
1031 return self._status[5]
1030 def clean(self):
1032 def clean(self):
1031 return self._status[6]
1033 return self._status[6]
1032 def branch(self):
1034 def branch(self):
1033 return self._extra['branch']
1035 return self._extra['branch']
1034 def extra(self):
1036 def extra(self):
1035 return self._extra
1037 return self._extra
1036 def flags(self, f):
1038 def flags(self, f):
1037 return self[f].flags()
1039 return self[f].flags()
1038
1040
1039 def parents(self):
1041 def parents(self):
1040 """return contexts for each parent changeset"""
1042 """return contexts for each parent changeset"""
1041 return self._parents
1043 return self._parents
1042
1044
1043 def filectx(self, path, filelog=None):
1045 def filectx(self, path, filelog=None):
1044 """get a file context from the working directory"""
1046 """get a file context from the working directory"""
1045 return self._filectxfn(self._repo, self, path)
1047 return self._filectxfn(self._repo, self, path)
1046
1048
1047 def commit(self):
1049 def commit(self):
1048 """commit context to the repo"""
1050 """commit context to the repo"""
1049 return self._repo.commitctx(self)
1051 return self._repo.commitctx(self)
1050
1052
1051 class memfilectx(object):
1053 class memfilectx(object):
1052 """memfilectx represents an in-memory file to commit.
1054 """memfilectx represents an in-memory file to commit.
1053
1055
1054 See memctx for more details.
1056 See memctx for more details.
1055 """
1057 """
1056 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1058 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1057 """
1059 """
1058 path is the normalized file path relative to repository root.
1060 path is the normalized file path relative to repository root.
1059 data is the file content as a string.
1061 data is the file content as a string.
1060 islink is True if the file is a symbolic link.
1062 islink is True if the file is a symbolic link.
1061 isexec is True if the file is executable.
1063 isexec is True if the file is executable.
1062 copied is the source file path if current file was copied in the
1064 copied is the source file path if current file was copied in the
1063 revision being committed, or None."""
1065 revision being committed, or None."""
1064 self._path = path
1066 self._path = path
1065 self._data = data
1067 self._data = data
1066 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1068 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1067 self._copied = None
1069 self._copied = None
1068 if copied:
1070 if copied:
1069 self._copied = (copied, nullid)
1071 self._copied = (copied, nullid)
1070
1072
1071 def __nonzero__(self):
1073 def __nonzero__(self):
1072 return True
1074 return True
1073 def __str__(self):
1075 def __str__(self):
1074 return "%s@%s" % (self.path(), self._changectx)
1076 return "%s@%s" % (self.path(), self._changectx)
1075 def path(self):
1077 def path(self):
1076 return self._path
1078 return self._path
1077 def data(self):
1079 def data(self):
1078 return self._data
1080 return self._data
1079 def flags(self):
1081 def flags(self):
1080 return self._flags
1082 return self._flags
1081 def isexec(self):
1083 def isexec(self):
1082 return 'x' in self._flags
1084 return 'x' in self._flags
1083 def islink(self):
1085 def islink(self):
1084 return 'l' in self._flags
1086 return 'l' in self._flags
1085 def renamed(self):
1087 def renamed(self):
1086 return self._copied
1088 return self._copied
@@ -1,259 +1,259 b''
1 # filemerge.py - file-level merge handling for Mercurial
1 # filemerge.py - file-level merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import short
8 from node import short
9 from i18n import _
9 from i18n import _
10 import util, simplemerge, match, error
10 import util, simplemerge, match, error
11 import os, tempfile, re, filecmp
11 import os, tempfile, re, filecmp
12
12
13 def _toolstr(ui, tool, part, default=""):
13 def _toolstr(ui, tool, part, default=""):
14 return ui.config("merge-tools", tool + "." + part, default)
14 return ui.config("merge-tools", tool + "." + part, default)
15
15
16 def _toolbool(ui, tool, part, default=False):
16 def _toolbool(ui, tool, part, default=False):
17 return ui.configbool("merge-tools", tool + "." + part, default)
17 return ui.configbool("merge-tools", tool + "." + part, default)
18
18
19 def _toollist(ui, tool, part, default=[]):
19 def _toollist(ui, tool, part, default=[]):
20 return ui.configlist("merge-tools", tool + "." + part, default)
20 return ui.configlist("merge-tools", tool + "." + part, default)
21
21
22 _internal = ['internal:' + s
22 _internal = ['internal:' + s
23 for s in 'fail local other merge prompt dump'.split()]
23 for s in 'fail local other merge prompt dump'.split()]
24
24
25 def _findtool(ui, tool):
25 def _findtool(ui, tool):
26 if tool in _internal:
26 if tool in _internal:
27 return tool
27 return tool
28 k = _toolstr(ui, tool, "regkey")
28 k = _toolstr(ui, tool, "regkey")
29 if k:
29 if k:
30 p = util.lookup_reg(k, _toolstr(ui, tool, "regname"))
30 p = util.lookup_reg(k, _toolstr(ui, tool, "regname"))
31 if p:
31 if p:
32 p = util.find_exe(p + _toolstr(ui, tool, "regappend"))
32 p = util.find_exe(p + _toolstr(ui, tool, "regappend"))
33 if p:
33 if p:
34 return p
34 return p
35 return util.find_exe(_toolstr(ui, tool, "executable", tool))
35 return util.find_exe(_toolstr(ui, tool, "executable", tool))
36
36
37 def _picktool(repo, ui, path, binary, symlink):
37 def _picktool(repo, ui, path, binary, symlink):
38 def check(tool, pat, symlink, binary):
38 def check(tool, pat, symlink, binary):
39 tmsg = tool
39 tmsg = tool
40 if pat:
40 if pat:
41 tmsg += " specified for " + pat
41 tmsg += " specified for " + pat
42 if not _findtool(ui, tool):
42 if not _findtool(ui, tool):
43 if pat: # explicitly requested tool deserves a warning
43 if pat: # explicitly requested tool deserves a warning
44 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
44 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
45 else: # configured but non-existing tools are more silent
45 else: # configured but non-existing tools are more silent
46 ui.note(_("couldn't find merge tool %s\n") % tmsg)
46 ui.note(_("couldn't find merge tool %s\n") % tmsg)
47 elif symlink and not _toolbool(ui, tool, "symlink"):
47 elif symlink and not _toolbool(ui, tool, "symlink"):
48 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
48 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
49 elif binary and not _toolbool(ui, tool, "binary"):
49 elif binary and not _toolbool(ui, tool, "binary"):
50 ui.warn(_("tool %s can't handle binary\n") % tmsg)
50 ui.warn(_("tool %s can't handle binary\n") % tmsg)
51 elif not util.gui() and _toolbool(ui, tool, "gui"):
51 elif not util.gui() and _toolbool(ui, tool, "gui"):
52 ui.warn(_("tool %s requires a GUI\n") % tmsg)
52 ui.warn(_("tool %s requires a GUI\n") % tmsg)
53 else:
53 else:
54 return True
54 return True
55 return False
55 return False
56
56
57 # HGMERGE takes precedence
57 # HGMERGE takes precedence
58 hgmerge = os.environ.get("HGMERGE")
58 hgmerge = os.environ.get("HGMERGE")
59 if hgmerge:
59 if hgmerge:
60 return (hgmerge, hgmerge)
60 return (hgmerge, hgmerge)
61
61
62 # then patterns
62 # then patterns
63 for pat, tool in ui.configitems("merge-patterns"):
63 for pat, tool in ui.configitems("merge-patterns"):
64 mf = match.match(repo.root, '', [pat])
64 mf = match.match(repo.root, '', [pat])
65 if mf(path) and check(tool, pat, symlink, False):
65 if mf(path) and check(tool, pat, symlink, False):
66 toolpath = _findtool(ui, tool)
66 toolpath = _findtool(ui, tool)
67 return (tool, '"' + toolpath + '"')
67 return (tool, '"' + toolpath + '"')
68
68
69 # then merge tools
69 # then merge tools
70 tools = {}
70 tools = {}
71 for k, v in ui.configitems("merge-tools"):
71 for k, v in ui.configitems("merge-tools"):
72 t = k.split('.')[0]
72 t = k.split('.')[0]
73 if t not in tools:
73 if t not in tools:
74 tools[t] = int(_toolstr(ui, t, "priority", "0"))
74 tools[t] = int(_toolstr(ui, t, "priority", "0"))
75 names = tools.keys()
75 names = tools.keys()
76 tools = sorted([(-p, t) for t, p in tools.items()])
76 tools = sorted([(-p, t) for t, p in tools.items()])
77 uimerge = ui.config("ui", "merge")
77 uimerge = ui.config("ui", "merge")
78 if uimerge:
78 if uimerge:
79 if uimerge not in names:
79 if uimerge not in names:
80 return (uimerge, uimerge)
80 return (uimerge, uimerge)
81 tools.insert(0, (None, uimerge)) # highest priority
81 tools.insert(0, (None, uimerge)) # highest priority
82 tools.append((None, "hgmerge")) # the old default, if found
82 tools.append((None, "hgmerge")) # the old default, if found
83 for p, t in tools:
83 for p, t in tools:
84 if check(t, None, symlink, binary):
84 if check(t, None, symlink, binary):
85 toolpath = _findtool(ui, t)
85 toolpath = _findtool(ui, t)
86 return (t, '"' + toolpath + '"')
86 return (t, '"' + toolpath + '"')
87 # internal merge as last resort
87 # internal merge as last resort
88 return (not (symlink or binary) and "internal:merge" or None, None)
88 return (not (symlink or binary) and "internal:merge" or None, None)
89
89
90 def _eoltype(data):
90 def _eoltype(data):
91 "Guess the EOL type of a file"
91 "Guess the EOL type of a file"
92 if '\0' in data: # binary
92 if '\0' in data: # binary
93 return None
93 return None
94 if '\r\n' in data: # Windows
94 if '\r\n' in data: # Windows
95 return '\r\n'
95 return '\r\n'
96 if '\r' in data: # Old Mac
96 if '\r' in data: # Old Mac
97 return '\r'
97 return '\r'
98 if '\n' in data: # UNIX
98 if '\n' in data: # UNIX
99 return '\n'
99 return '\n'
100 return None # unknown
100 return None # unknown
101
101
102 def _matcheol(file, origfile):
102 def _matcheol(file, origfile):
103 "Convert EOL markers in a file to match origfile"
103 "Convert EOL markers in a file to match origfile"
104 tostyle = _eoltype(open(origfile, "rb").read())
104 tostyle = _eoltype(open(origfile, "rb").read())
105 if tostyle:
105 if tostyle:
106 data = open(file, "rb").read()
106 data = open(file, "rb").read()
107 style = _eoltype(data)
107 style = _eoltype(data)
108 if style:
108 if style:
109 newdata = data.replace(style, tostyle)
109 newdata = data.replace(style, tostyle)
110 if newdata != data:
110 if newdata != data:
111 open(file, "wb").write(newdata)
111 open(file, "wb").write(newdata)
112
112
113 def filemerge(repo, mynode, orig, fcd, fco, fca):
113 def filemerge(repo, mynode, orig, fcd, fco, fca):
114 """perform a 3-way merge in the working directory
114 """perform a 3-way merge in the working directory
115
115
116 mynode = parent node before merge
116 mynode = parent node before merge
117 orig = original local filename before merge
117 orig = original local filename before merge
118 fco = other file context
118 fco = other file context
119 fca = ancestor file context
119 fca = ancestor file context
120 fcd = local file context for current/destination file
120 fcd = local file context for current/destination file
121 """
121 """
122
122
123 def temp(prefix, ctx):
123 def temp(prefix, ctx):
124 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
124 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
125 (fd, name) = tempfile.mkstemp(prefix=pre)
125 (fd, name) = tempfile.mkstemp(prefix=pre)
126 data = repo.wwritedata(ctx.path(), ctx.data())
126 data = repo.wwritedata(ctx.path(), ctx.data())
127 f = os.fdopen(fd, "wb")
127 f = os.fdopen(fd, "wb")
128 f.write(data)
128 f.write(data)
129 f.close()
129 f.close()
130 return name
130 return name
131
131
132 def isbin(ctx):
132 def isbin(ctx):
133 try:
133 try:
134 return util.binary(ctx.data())
134 return util.binary(ctx.data())
135 except IOError:
135 except IOError:
136 return False
136 return False
137
137
138 if not fco.cmp(fcd.data()): # files identical?
138 if not fco.cmp(fcd): # files identical?
139 return None
139 return None
140
140
141 if fca == fco: # backwards, use working dir parent as ancestor
141 if fca == fco: # backwards, use working dir parent as ancestor
142 fca = fcd.parents()[0]
142 fca = fcd.parents()[0]
143
143
144 ui = repo.ui
144 ui = repo.ui
145 fd = fcd.path()
145 fd = fcd.path()
146 binary = isbin(fcd) or isbin(fco) or isbin(fca)
146 binary = isbin(fcd) or isbin(fco) or isbin(fca)
147 symlink = 'l' in fcd.flags() + fco.flags()
147 symlink = 'l' in fcd.flags() + fco.flags()
148 tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
148 tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
149 ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
149 ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
150 (tool, fd, binary, symlink))
150 (tool, fd, binary, symlink))
151
151
152 if not tool or tool == 'internal:prompt':
152 if not tool or tool == 'internal:prompt':
153 tool = "internal:local"
153 tool = "internal:local"
154 if ui.promptchoice(_(" no tool found to merge %s\n"
154 if ui.promptchoice(_(" no tool found to merge %s\n"
155 "keep (l)ocal or take (o)ther?") % fd,
155 "keep (l)ocal or take (o)ther?") % fd,
156 (_("&Local"), _("&Other")), 0):
156 (_("&Local"), _("&Other")), 0):
157 tool = "internal:other"
157 tool = "internal:other"
158 if tool == "internal:local":
158 if tool == "internal:local":
159 return 0
159 return 0
160 if tool == "internal:other":
160 if tool == "internal:other":
161 repo.wwrite(fd, fco.data(), fco.flags())
161 repo.wwrite(fd, fco.data(), fco.flags())
162 return 0
162 return 0
163 if tool == "internal:fail":
163 if tool == "internal:fail":
164 return 1
164 return 1
165
165
166 # do the actual merge
166 # do the actual merge
167 a = repo.wjoin(fd)
167 a = repo.wjoin(fd)
168 b = temp("base", fca)
168 b = temp("base", fca)
169 c = temp("other", fco)
169 c = temp("other", fco)
170 out = ""
170 out = ""
171 back = a + ".orig"
171 back = a + ".orig"
172 util.copyfile(a, back)
172 util.copyfile(a, back)
173
173
174 if orig != fco.path():
174 if orig != fco.path():
175 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
175 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
176 else:
176 else:
177 ui.status(_("merging %s\n") % fd)
177 ui.status(_("merging %s\n") % fd)
178
178
179 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
179 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
180
180
181 # do we attempt to simplemerge first?
181 # do we attempt to simplemerge first?
182 try:
182 try:
183 premerge = _toolbool(ui, tool, "premerge", not (binary or symlink))
183 premerge = _toolbool(ui, tool, "premerge", not (binary or symlink))
184 except error.ConfigError:
184 except error.ConfigError:
185 premerge = _toolstr(ui, tool, "premerge").lower()
185 premerge = _toolstr(ui, tool, "premerge").lower()
186 valid = 'keep'.split()
186 valid = 'keep'.split()
187 if premerge not in valid:
187 if premerge not in valid:
188 _valid = ', '.join(["'" + v + "'" for v in valid])
188 _valid = ', '.join(["'" + v + "'" for v in valid])
189 raise error.ConfigError(_("%s.premerge not valid "
189 raise error.ConfigError(_("%s.premerge not valid "
190 "('%s' is neither boolean nor %s)") %
190 "('%s' is neither boolean nor %s)") %
191 (tool, premerge, _valid))
191 (tool, premerge, _valid))
192
192
193 if premerge:
193 if premerge:
194 r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
194 r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
195 if not r:
195 if not r:
196 ui.debug(" premerge successful\n")
196 ui.debug(" premerge successful\n")
197 os.unlink(back)
197 os.unlink(back)
198 os.unlink(b)
198 os.unlink(b)
199 os.unlink(c)
199 os.unlink(c)
200 return 0
200 return 0
201 if premerge != 'keep':
201 if premerge != 'keep':
202 util.copyfile(back, a) # restore from backup and try again
202 util.copyfile(back, a) # restore from backup and try again
203
203
204 env = dict(HG_FILE=fd,
204 env = dict(HG_FILE=fd,
205 HG_MY_NODE=short(mynode),
205 HG_MY_NODE=short(mynode),
206 HG_OTHER_NODE=str(fco.changectx()),
206 HG_OTHER_NODE=str(fco.changectx()),
207 HG_BASE_NODE=str(fca.changectx()),
207 HG_BASE_NODE=str(fca.changectx()),
208 HG_MY_ISLINK='l' in fcd.flags(),
208 HG_MY_ISLINK='l' in fcd.flags(),
209 HG_OTHER_ISLINK='l' in fco.flags(),
209 HG_OTHER_ISLINK='l' in fco.flags(),
210 HG_BASE_ISLINK='l' in fca.flags())
210 HG_BASE_ISLINK='l' in fca.flags())
211
211
212 if tool == "internal:merge":
212 if tool == "internal:merge":
213 r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
213 r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
214 elif tool == 'internal:dump':
214 elif tool == 'internal:dump':
215 a = repo.wjoin(fd)
215 a = repo.wjoin(fd)
216 util.copyfile(a, a + ".local")
216 util.copyfile(a, a + ".local")
217 repo.wwrite(fd + ".other", fco.data(), fco.flags())
217 repo.wwrite(fd + ".other", fco.data(), fco.flags())
218 repo.wwrite(fd + ".base", fca.data(), fca.flags())
218 repo.wwrite(fd + ".base", fca.data(), fca.flags())
219 return 1 # unresolved
219 return 1 # unresolved
220 else:
220 else:
221 args = _toolstr(ui, tool, "args", '$local $base $other')
221 args = _toolstr(ui, tool, "args", '$local $base $other')
222 if "$output" in args:
222 if "$output" in args:
223 out, a = a, back # read input from backup, write to original
223 out, a = a, back # read input from backup, write to original
224 replace = dict(local=a, base=b, other=c, output=out)
224 replace = dict(local=a, base=b, other=c, output=out)
225 args = re.sub("\$(local|base|other|output)",
225 args = re.sub("\$(local|base|other|output)",
226 lambda x: '"%s"' % util.localpath(replace[x.group()[1:]]), args)
226 lambda x: '"%s"' % util.localpath(replace[x.group()[1:]]), args)
227 r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env)
227 r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env)
228
228
229 if not r and (_toolbool(ui, tool, "checkconflicts") or
229 if not r and (_toolbool(ui, tool, "checkconflicts") or
230 'conflicts' in _toollist(ui, tool, "check")):
230 'conflicts' in _toollist(ui, tool, "check")):
231 if re.match("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data()):
231 if re.match("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data()):
232 r = 1
232 r = 1
233
233
234 checked = False
234 checked = False
235 if 'prompt' in _toollist(ui, tool, "check"):
235 if 'prompt' in _toollist(ui, tool, "check"):
236 checked = True
236 checked = True
237 if ui.promptchoice(_("was merge of '%s' successful (yn)?") % fd,
237 if ui.promptchoice(_("was merge of '%s' successful (yn)?") % fd,
238 (_("&Yes"), _("&No")), 1):
238 (_("&Yes"), _("&No")), 1):
239 r = 1
239 r = 1
240
240
241 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
241 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
242 'changed' in _toollist(ui, tool, "check")):
242 'changed' in _toollist(ui, tool, "check")):
243 if filecmp.cmp(repo.wjoin(fd), back):
243 if filecmp.cmp(repo.wjoin(fd), back):
244 if ui.promptchoice(_(" output file %s appears unchanged\n"
244 if ui.promptchoice(_(" output file %s appears unchanged\n"
245 "was merge successful (yn)?") % fd,
245 "was merge successful (yn)?") % fd,
246 (_("&Yes"), _("&No")), 1):
246 (_("&Yes"), _("&No")), 1):
247 r = 1
247 r = 1
248
248
249 if _toolbool(ui, tool, "fixeol"):
249 if _toolbool(ui, tool, "fixeol"):
250 _matcheol(repo.wjoin(fd), back)
250 _matcheol(repo.wjoin(fd), back)
251
251
252 if r:
252 if r:
253 ui.warn(_("merging %s failed!\n") % fd)
253 ui.warn(_("merging %s failed!\n") % fd)
254 else:
254 else:
255 os.unlink(back)
255 os.unlink(back)
256
256
257 os.unlink(b)
257 os.unlink(b)
258 os.unlink(c)
258 os.unlink(c)
259 return r
259 return r
@@ -1,166 +1,169 b''
1 Mercurial supports a functional language for selecting a set of
1 Mercurial supports a functional language for selecting a set of
2 revisions.
2 revisions.
3
3
4 The language supports a number of predicates which are joined by infix
4 The language supports a number of predicates which are joined by infix
5 operators. Parenthesis can be used for grouping.
5 operators. Parenthesis can be used for grouping.
6
6
7 Identifiers such as branch names must be quoted with single or double
7 Identifiers such as branch names must be quoted with single or double
8 quotes if they contain characters outside of
8 quotes if they contain characters outside of
9 ``[._a-zA-Z0-9\x80-\xff]`` or if they match one of the predefined
9 ``[._a-zA-Z0-9\x80-\xff]`` or if they match one of the predefined
10 predicates. Special characters can be used in quoted identifiers by
10 predicates. Special characters can be used in quoted identifiers by
11 escaping them, e.g., ``\n`` is interpreted as a newline.
11 escaping them, e.g., ``\n`` is interpreted as a newline.
12
12
13 There is a single prefix operator:
13 There is a single prefix operator:
14
14
15 ``not x``
15 ``not x``
16 Changesets not in x. Short form is ``! x``.
16 Changesets not in x. Short form is ``! x``.
17
17
18 These are the supported infix operators:
18 These are the supported infix operators:
19
19
20 ``x::y``
20 ``x::y``
21 A DAG range, meaning all changesets that are descendants of x and
21 A DAG range, meaning all changesets that are descendants of x and
22 ancestors of y, including x and y themselves. If the first endpoint
22 ancestors of y, including x and y themselves. If the first endpoint
23 is left out, this is equivalent to ``ancestors(y)``, if the second
23 is left out, this is equivalent to ``ancestors(y)``, if the second
24 is left out it is equivalent to ``descendants(x)``.
24 is left out it is equivalent to ``descendants(x)``.
25
25
26 An alternative syntax is ``x..y``.
26 An alternative syntax is ``x..y``.
27
27
28 ``x:y``
28 ``x:y``
29 All changesets with revision numbers between x and y, both
29 All changesets with revision numbers between x and y, both
30 inclusive. Either endpoint can be left out, they default to 0 and
30 inclusive. Either endpoint can be left out, they default to 0 and
31 tip.
31 tip.
32
32
33 ``x and y``
33 ``x and y``
34 The intersection of changesets in x and y. Short form is ``x & y``.
34 The intersection of changesets in x and y. Short form is ``x & y``.
35
35
36 ``x or y``
36 ``x or y``
37 The union of changesets in x and y. There are two alternative short
37 The union of changesets in x and y. There are two alternative short
38 forms: ``x | y`` and ``x + y``.
38 forms: ``x | y`` and ``x + y``.
39
39
40 ``x - y``
40 ``x - y``
41 Changesets in x but not in y.
41 Changesets in x but not in y.
42
42
43 The following predicates are supported:
43 The following predicates are supported:
44
44
45 ``adds(pattern)``
45 ``adds(pattern)``
46 Changesets that add a file matching pattern.
46 Changesets that add a file matching pattern.
47
47
48 ``all()``
48 ``all()``
49 All changesets, the same as ``0:tip``.
49 All changesets, the same as ``0:tip``.
50
50
51 ``ancestor(single, single)``
51 ``ancestor(single, single)``
52 Greatest common ancestor of the two changesets.
52 Greatest common ancestor of the two changesets.
53
53
54 ``ancestors(set)``
54 ``ancestors(set)``
55 Changesets that are ancestors of a changeset in set.
55 Changesets that are ancestors of a changeset in set.
56
56
57 ``author(string)``
57 ``author(string)``
58 Alias for ``user(string)``.
58 Alias for ``user(string)``.
59
59
60 ``branch(set)``
60 ``branch(set)``
61 All changesets belonging to the branches of changesets in set.
61 All changesets belonging to the branches of changesets in set.
62
62
63 ``children(set)``
63 ``children(set)``
64 Child changesets of changesets in set.
64 Child changesets of changesets in set.
65
65
66 ``closed()``
66 ``closed()``
67 Changeset is closed.
67 Changeset is closed.
68
68
69 ``contains(pattern)``
69 ``contains(pattern)``
70 Revision contains pattern.
70 Revision contains pattern.
71
71
72 ``date(interval)``
72 ``date(interval)``
73 Changesets within the interval, see :hg:`help dates`.
73 Changesets within the interval, see :hg:`help dates`.
74
74
75 ``descendants(set)``
75 ``descendants(set)``
76 Changesets which are descendants of changesets in set.
76 Changesets which are descendants of changesets in set.
77
77
78 ``file(pattern)``
78 ``file(pattern)``
79 Changesets affecting files matched by pattern.
79 Changesets affecting files matched by pattern.
80
80
81 ``follow()``
81 ``follow()``
82 An alias for ``::.`` (ancestors of the working copy's first parent).
82 An alias for ``::.`` (ancestors of the working copy's first parent).
83
83
84 ``grep(regex)``
84 ``grep(regex)``
85 Like ``keyword(string)`` but accepts a regex.
85 Like ``keyword(string)`` but accepts a regex.
86
86
87 ``head()``
87 ``head()``
88 Changeset is a head.
88 Changeset is a head.
89
89
90 ``heads(set)``
90 ``heads(set)``
91 Members of set with no children in set.
91 Members of set with no children in set.
92
92
93 ``keyword(string)``
93 ``keyword(string)``
94 Search commit message, user name, and names of changed files for
94 Search commit message, user name, and names of changed files for
95 string.
95 string.
96
96
97 ``limit(set, n)``
97 ``limit(set, n)``
98 First n members of set.
98 First n members of set.
99
99
100 ``max(set)``
100 ``max(set)``
101 Changeset with highest revision number in set.
101 Changeset with highest revision number in set.
102
102
103 ``min(set)``
104 Changeset with lowest revision number in set.
105
103 ``merge()``
106 ``merge()``
104 Changeset is a merge changeset.
107 Changeset is a merge changeset.
105
108
106 ``modifies(pattern)``
109 ``modifies(pattern)``
107 Changesets modifying files matched by pattern.
110 Changesets modifying files matched by pattern.
108
111
109 ``outgoing([path])``
112 ``outgoing([path])``
110 Changesets not found in the specified destination repository, or the
113 Changesets not found in the specified destination repository, or the
111 default push location.
114 default push location.
112
115
113 ``p1(set)``
116 ``p1(set)``
114 First parent of changesets in set.
117 First parent of changesets in set.
115
118
116 ``p2(set)``
119 ``p2(set)``
117 Second parent of changesets in set.
120 Second parent of changesets in set.
118
121
119 ``parents(set)``
122 ``parents(set)``
120 The set of all parents for all changesets in set.
123 The set of all parents for all changesets in set.
121
124
122 ``removes(pattern)``
125 ``removes(pattern)``
123 Changesets which remove files matching pattern.
126 Changesets which remove files matching pattern.
124
127
125 ``reverse(set)``
128 ``reverse(set)``
126 Reverse order of set.
129 Reverse order of set.
127
130
128 ``roots(set)``
131 ``roots(set)``
129 Changesets with no parent changeset in set.
132 Changesets with no parent changeset in set.
130
133
131 ``sort(set[, [-]key...])``
134 ``sort(set[, [-]key...])``
132 Sort set by keys. The default sort order is ascending, specify a key
135 Sort set by keys. The default sort order is ascending, specify a key
133 as ``-key`` to sort in descending order.
136 as ``-key`` to sort in descending order.
134
137
135 The keys can be:
138 The keys can be:
136
139
137 - ``rev`` for the revision number,
140 - ``rev`` for the revision number,
138 - ``branch`` for the branch name,
141 - ``branch`` for the branch name,
139 - ``desc`` for the commit message (description),
142 - ``desc`` for the commit message (description),
140 - ``user`` for user name (``author`` can be used as an alias),
143 - ``user`` for user name (``author`` can be used as an alias),
141 - ``date`` for the commit date
144 - ``date`` for the commit date
142
145
143 ``tagged()``
146 ``tagged()``
144 Changeset is tagged.
147 Changeset is tagged.
145
148
146 ``user(string)``
149 ``user(string)``
147 User name is string.
150 User name is string.
148
151
149 Command line equivalents for :hg:`log`::
152 Command line equivalents for :hg:`log`::
150
153
151 -f -> ::.
154 -f -> ::.
152 -d x -> date(x)
155 -d x -> date(x)
153 -k x -> keyword(x)
156 -k x -> keyword(x)
154 -m -> merge()
157 -m -> merge()
155 -u x -> user(x)
158 -u x -> user(x)
156 -b x -> branch(x)
159 -b x -> branch(x)
157 -P x -> !::x
160 -P x -> !::x
158 -l x -> limit(expr, x)
161 -l x -> limit(expr, x)
159
162
160 Some sample queries::
163 Some sample queries::
161
164
162 hg log -r 'branch(default)'
165 hg log -r 'branch(default)'
163 hg log -r 'branch(default) and 1.5:: and not merge()'
166 hg log -r 'branch(default) and 1.5:: and not merge()'
164 hg log -r '1.3::1.5 and keyword(bug) and file("hgext/*")'
167 hg log -r '1.3::1.5 and keyword(bug) and file("hgext/*")'
165 hg log -r 'sort(date("May 2008"), user)'
168 hg log -r 'sort(date("May 2008"), user)'
166 hg log -r '(keyword(bug) or keyword(issue)) and not ancestors(tagged())'
169 hg log -r '(keyword(bug) or keyword(issue)) and not ancestors(tagged())'
@@ -1,1799 +1,1802 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared'.split())
24 supported = set('revlogv1 store fncache shared'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.opener = util.opener(self.path)
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
32 self.wopener = util.opener(self.root)
33 self.baseui = baseui
33 self.baseui = baseui
34 self.ui = baseui.copy()
34 self.ui = baseui.copy()
35
35
36 try:
36 try:
37 self.ui.readconfig(self.join("hgrc"), self.root)
37 self.ui.readconfig(self.join("hgrc"), self.root)
38 extensions.loadall(self.ui)
38 extensions.loadall(self.ui)
39 except IOError:
39 except IOError:
40 pass
40 pass
41
41
42 if not os.path.isdir(self.path):
42 if not os.path.isdir(self.path):
43 if create:
43 if create:
44 if not os.path.exists(path):
44 if not os.path.exists(path):
45 util.makedirs(path)
45 util.makedirs(path)
46 os.mkdir(self.path)
46 os.mkdir(self.path)
47 requirements = ["revlogv1"]
47 requirements = ["revlogv1"]
48 if self.ui.configbool('format', 'usestore', True):
48 if self.ui.configbool('format', 'usestore', True):
49 os.mkdir(os.path.join(self.path, "store"))
49 os.mkdir(os.path.join(self.path, "store"))
50 requirements.append("store")
50 requirements.append("store")
51 if self.ui.configbool('format', 'usefncache', True):
51 if self.ui.configbool('format', 'usefncache', True):
52 requirements.append("fncache")
52 requirements.append("fncache")
53 # create an invalid changelog
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
56 ' dummy changelog to prevent using the old repo layout'
57 )
57 )
58 reqfile = self.opener("requires", "w")
58 reqfile = self.opener("requires", "w")
59 for r in requirements:
59 for r in requirements:
60 reqfile.write("%s\n" % r)
60 reqfile.write("%s\n" % r)
61 reqfile.close()
61 reqfile.close()
62 else:
62 else:
63 raise error.RepoError(_("repository %s not found") % path)
63 raise error.RepoError(_("repository %s not found") % path)
64 elif create:
64 elif create:
65 raise error.RepoError(_("repository %s already exists") % path)
65 raise error.RepoError(_("repository %s already exists") % path)
66 else:
66 else:
67 # find requirements
67 # find requirements
68 requirements = set()
68 requirements = set()
69 try:
69 try:
70 requirements = set(self.opener("requires").read().splitlines())
70 requirements = set(self.opener("requires").read().splitlines())
71 except IOError, inst:
71 except IOError, inst:
72 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
73 raise
73 raise
74 for r in requirements - self.supported:
74 for r in requirements - self.supported:
75 raise error.RepoError(_("requirement '%s' not supported") % r)
75 raise error.RepoError(_("requirement '%s' not supported") % r)
76
76
77 self.sharedpath = self.path
77 self.sharedpath = self.path
78 try:
78 try:
79 s = os.path.realpath(self.opener("sharedpath").read())
79 s = os.path.realpath(self.opener("sharedpath").read())
80 if not os.path.exists(s):
80 if not os.path.exists(s):
81 raise error.RepoError(
81 raise error.RepoError(
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
83 self.sharedpath = s
83 self.sharedpath = s
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87
87
88 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.store = store.store(requirements, self.sharedpath, util.opener)
89 self.spath = self.store.path
89 self.spath = self.store.path
90 self.sopener = self.store.opener
90 self.sopener = self.store.opener
91 self.sjoin = self.store.join
91 self.sjoin = self.store.join
92 self.opener.createmode = self.store.createmode
92 self.opener.createmode = self.store.createmode
93 self.sopener.options = {}
93 self.sopener.options = {}
94
94
95 # These two define the set of tags for this repository. _tags
95 # These two define the set of tags for this repository. _tags
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
97 # 'local'. (Global tags are defined by .hgtags across all
97 # 'local'. (Global tags are defined by .hgtags across all
98 # heads, and local tags are defined in .hg/localtags.) They
98 # heads, and local tags are defined in .hg/localtags.) They
99 # constitute the in-memory cache of tags.
99 # constitute the in-memory cache of tags.
100 self._tags = None
100 self._tags = None
101 self._tagtypes = None
101 self._tagtypes = None
102
102
103 self._branchcache = None # in UTF-8
103 self._branchcache = None # in UTF-8
104 self._branchcachetip = None
104 self._branchcachetip = None
105 self.nodetagscache = None
105 self.nodetagscache = None
106 self.filterpats = {}
106 self.filterpats = {}
107 self._datafilters = {}
107 self._datafilters = {}
108 self._transref = self._lockref = self._wlockref = None
108 self._transref = self._lockref = self._wlockref = None
109
109
110 @propertycache
110 @propertycache
111 def changelog(self):
111 def changelog(self):
112 c = changelog.changelog(self.sopener)
112 c = changelog.changelog(self.sopener)
113 if 'HG_PENDING' in os.environ:
113 if 'HG_PENDING' in os.environ:
114 p = os.environ['HG_PENDING']
114 p = os.environ['HG_PENDING']
115 if p.startswith(self.root):
115 if p.startswith(self.root):
116 c.readpending('00changelog.i.a')
116 c.readpending('00changelog.i.a')
117 self.sopener.options['defversion'] = c.version
117 self.sopener.options['defversion'] = c.version
118 return c
118 return c
119
119
120 @propertycache
120 @propertycache
121 def manifest(self):
121 def manifest(self):
122 return manifest.manifest(self.sopener)
122 return manifest.manifest(self.sopener)
123
123
124 @propertycache
124 @propertycache
125 def dirstate(self):
125 def dirstate(self):
126 return dirstate.dirstate(self.opener, self.ui, self.root)
126 return dirstate.dirstate(self.opener, self.ui, self.root)
127
127
128 def __getitem__(self, changeid):
128 def __getitem__(self, changeid):
129 if changeid is None:
129 if changeid is None:
130 return context.workingctx(self)
130 return context.workingctx(self)
131 return context.changectx(self, changeid)
131 return context.changectx(self, changeid)
132
132
133 def __contains__(self, changeid):
133 def __contains__(self, changeid):
134 try:
134 try:
135 return bool(self.lookup(changeid))
135 return bool(self.lookup(changeid))
136 except error.RepoLookupError:
136 except error.RepoLookupError:
137 return False
137 return False
138
138
139 def __nonzero__(self):
139 def __nonzero__(self):
140 return True
140 return True
141
141
142 def __len__(self):
142 def __len__(self):
143 return len(self.changelog)
143 return len(self.changelog)
144
144
145 def __iter__(self):
145 def __iter__(self):
146 for i in xrange(len(self)):
146 for i in xrange(len(self)):
147 yield i
147 yield i
148
148
149 def url(self):
149 def url(self):
150 return 'file:' + self.root
150 return 'file:' + self.root
151
151
152 def hook(self, name, throw=False, **args):
152 def hook(self, name, throw=False, **args):
153 return hook.hook(self.ui, self, name, throw, **args)
153 return hook.hook(self.ui, self, name, throw, **args)
154
154
155 tag_disallowed = ':\r\n'
155 tag_disallowed = ':\r\n'
156
156
157 def _tag(self, names, node, message, local, user, date, extra={}):
157 def _tag(self, names, node, message, local, user, date, extra={}):
158 if isinstance(names, str):
158 if isinstance(names, str):
159 allchars = names
159 allchars = names
160 names = (names,)
160 names = (names,)
161 else:
161 else:
162 allchars = ''.join(names)
162 allchars = ''.join(names)
163 for c in self.tag_disallowed:
163 for c in self.tag_disallowed:
164 if c in allchars:
164 if c in allchars:
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
166
166
167 branches = self.branchmap()
167 branches = self.branchmap()
168 for name in names:
168 for name in names:
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
170 local=local)
170 local=local)
171 if name in branches:
171 if name in branches:
172 self.ui.warn(_("warning: tag %s conflicts with existing"
172 self.ui.warn(_("warning: tag %s conflicts with existing"
173 " branch name\n") % name)
173 " branch name\n") % name)
174
174
175 def writetags(fp, names, munge, prevtags):
175 def writetags(fp, names, munge, prevtags):
176 fp.seek(0, 2)
176 fp.seek(0, 2)
177 if prevtags and prevtags[-1] != '\n':
177 if prevtags and prevtags[-1] != '\n':
178 fp.write('\n')
178 fp.write('\n')
179 for name in names:
179 for name in names:
180 m = munge and munge(name) or name
180 m = munge and munge(name) or name
181 if self._tagtypes and name in self._tagtypes:
181 if self._tagtypes and name in self._tagtypes:
182 old = self._tags.get(name, nullid)
182 old = self._tags.get(name, nullid)
183 fp.write('%s %s\n' % (hex(old), m))
183 fp.write('%s %s\n' % (hex(old), m))
184 fp.write('%s %s\n' % (hex(node), m))
184 fp.write('%s %s\n' % (hex(node), m))
185 fp.close()
185 fp.close()
186
186
187 prevtags = ''
187 prevtags = ''
188 if local:
188 if local:
189 try:
189 try:
190 fp = self.opener('localtags', 'r+')
190 fp = self.opener('localtags', 'r+')
191 except IOError:
191 except IOError:
192 fp = self.opener('localtags', 'a')
192 fp = self.opener('localtags', 'a')
193 else:
193 else:
194 prevtags = fp.read()
194 prevtags = fp.read()
195
195
196 # local tags are stored in the current charset
196 # local tags are stored in the current charset
197 writetags(fp, names, None, prevtags)
197 writetags(fp, names, None, prevtags)
198 for name in names:
198 for name in names:
199 self.hook('tag', node=hex(node), tag=name, local=local)
199 self.hook('tag', node=hex(node), tag=name, local=local)
200 return
200 return
201
201
202 try:
202 try:
203 fp = self.wfile('.hgtags', 'rb+')
203 fp = self.wfile('.hgtags', 'rb+')
204 except IOError:
204 except IOError:
205 fp = self.wfile('.hgtags', 'ab')
205 fp = self.wfile('.hgtags', 'ab')
206 else:
206 else:
207 prevtags = fp.read()
207 prevtags = fp.read()
208
208
209 # committed tags are stored in UTF-8
209 # committed tags are stored in UTF-8
210 writetags(fp, names, encoding.fromlocal, prevtags)
210 writetags(fp, names, encoding.fromlocal, prevtags)
211
211
212 if '.hgtags' not in self.dirstate:
212 if '.hgtags' not in self.dirstate:
213 self[None].add(['.hgtags'])
213 self[None].add(['.hgtags'])
214
214
215 m = matchmod.exact(self.root, '', ['.hgtags'])
215 m = matchmod.exact(self.root, '', ['.hgtags'])
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
217
217
218 for name in names:
218 for name in names:
219 self.hook('tag', node=hex(node), tag=name, local=local)
219 self.hook('tag', node=hex(node), tag=name, local=local)
220
220
221 return tagnode
221 return tagnode
222
222
223 def tag(self, names, node, message, local, user, date):
223 def tag(self, names, node, message, local, user, date):
224 '''tag a revision with one or more symbolic names.
224 '''tag a revision with one or more symbolic names.
225
225
226 names is a list of strings or, when adding a single tag, names may be a
226 names is a list of strings or, when adding a single tag, names may be a
227 string.
227 string.
228
228
229 if local is True, the tags are stored in a per-repository file.
229 if local is True, the tags are stored in a per-repository file.
230 otherwise, they are stored in the .hgtags file, and a new
230 otherwise, they are stored in the .hgtags file, and a new
231 changeset is committed with the change.
231 changeset is committed with the change.
232
232
233 keyword arguments:
233 keyword arguments:
234
234
235 local: whether to store tags in non-version-controlled file
235 local: whether to store tags in non-version-controlled file
236 (default False)
236 (default False)
237
237
238 message: commit message to use if committing
238 message: commit message to use if committing
239
239
240 user: name of user to use if committing
240 user: name of user to use if committing
241
241
242 date: date tuple to use if committing'''
242 date: date tuple to use if committing'''
243
243
244 for x in self.status()[:5]:
244 for x in self.status()[:5]:
245 if '.hgtags' in x:
245 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
246 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
247 '(please commit .hgtags manually)'))
248
248
249 self.tags() # instantiate the cache
249 self.tags() # instantiate the cache
250 self._tag(names, node, message, local, user, date)
250 self._tag(names, node, message, local, user, date)
251
251
252 def tags(self):
252 def tags(self):
253 '''return a mapping of tag to node'''
253 '''return a mapping of tag to node'''
254 if self._tags is None:
254 if self._tags is None:
255 (self._tags, self._tagtypes) = self._findtags()
255 (self._tags, self._tagtypes) = self._findtags()
256
256
257 return self._tags
257 return self._tags
258
258
259 def _findtags(self):
259 def _findtags(self):
260 '''Do the hard work of finding tags. Return a pair of dicts
260 '''Do the hard work of finding tags. Return a pair of dicts
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
262 maps tag name to a string like \'global\' or \'local\'.
262 maps tag name to a string like \'global\' or \'local\'.
263 Subclasses or extensions are free to add their own tags, but
263 Subclasses or extensions are free to add their own tags, but
264 should be aware that the returned dicts will be retained for the
264 should be aware that the returned dicts will be retained for the
265 duration of the localrepo object.'''
265 duration of the localrepo object.'''
266
266
267 # XXX what tagtype should subclasses/extensions use? Currently
267 # XXX what tagtype should subclasses/extensions use? Currently
268 # mq and bookmarks add tags, but do not set the tagtype at all.
268 # mq and bookmarks add tags, but do not set the tagtype at all.
269 # Should each extension invent its own tag type? Should there
269 # Should each extension invent its own tag type? Should there
270 # be one tagtype for all such "virtual" tags? Or is the status
270 # be one tagtype for all such "virtual" tags? Or is the status
271 # quo fine?
271 # quo fine?
272
272
273 alltags = {} # map tag name to (node, hist)
273 alltags = {} # map tag name to (node, hist)
274 tagtypes = {}
274 tagtypes = {}
275
275
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
278
278
279 # Build the return dicts. Have to re-encode tag names because
279 # Build the return dicts. Have to re-encode tag names because
280 # the tags module always uses UTF-8 (in order not to lose info
280 # the tags module always uses UTF-8 (in order not to lose info
281 # writing to the cache), but the rest of Mercurial wants them in
281 # writing to the cache), but the rest of Mercurial wants them in
282 # local encoding.
282 # local encoding.
283 tags = {}
283 tags = {}
284 for (name, (node, hist)) in alltags.iteritems():
284 for (name, (node, hist)) in alltags.iteritems():
285 if node != nullid:
285 if node != nullid:
286 tags[encoding.tolocal(name)] = node
286 tags[encoding.tolocal(name)] = node
287 tags['tip'] = self.changelog.tip()
287 tags['tip'] = self.changelog.tip()
288 tagtypes = dict([(encoding.tolocal(name), value)
288 tagtypes = dict([(encoding.tolocal(name), value)
289 for (name, value) in tagtypes.iteritems()])
289 for (name, value) in tagtypes.iteritems()])
290 return (tags, tagtypes)
290 return (tags, tagtypes)
291
291
292 def tagtype(self, tagname):
292 def tagtype(self, tagname):
293 '''
293 '''
294 return the type of the given tag. result can be:
294 return the type of the given tag. result can be:
295
295
296 'local' : a local tag
296 'local' : a local tag
297 'global' : a global tag
297 'global' : a global tag
298 None : tag does not exist
298 None : tag does not exist
299 '''
299 '''
300
300
301 self.tags()
301 self.tags()
302
302
303 return self._tagtypes.get(tagname)
303 return self._tagtypes.get(tagname)
304
304
305 def tagslist(self):
305 def tagslist(self):
306 '''return a list of tags ordered by revision'''
306 '''return a list of tags ordered by revision'''
307 l = []
307 l = []
308 for t, n in self.tags().iteritems():
308 for t, n in self.tags().iteritems():
309 try:
309 try:
310 r = self.changelog.rev(n)
310 r = self.changelog.rev(n)
311 except:
311 except:
312 r = -2 # sort to the beginning of the list if unknown
312 r = -2 # sort to the beginning of the list if unknown
313 l.append((r, t, n))
313 l.append((r, t, n))
314 return [(t, n) for r, t, n in sorted(l)]
314 return [(t, n) for r, t, n in sorted(l)]
315
315
316 def nodetags(self, node):
316 def nodetags(self, node):
317 '''return the tags associated with a node'''
317 '''return the tags associated with a node'''
318 if not self.nodetagscache:
318 if not self.nodetagscache:
319 self.nodetagscache = {}
319 self.nodetagscache = {}
320 for t, n in self.tags().iteritems():
320 for t, n in self.tags().iteritems():
321 self.nodetagscache.setdefault(n, []).append(t)
321 self.nodetagscache.setdefault(n, []).append(t)
322 for tags in self.nodetagscache.itervalues():
322 for tags in self.nodetagscache.itervalues():
323 tags.sort()
323 tags.sort()
324 return self.nodetagscache.get(node, [])
324 return self.nodetagscache.get(node, [])
325
325
326 def _branchtags(self, partial, lrev):
326 def _branchtags(self, partial, lrev):
327 # TODO: rename this function?
327 # TODO: rename this function?
328 tiprev = len(self) - 1
328 tiprev = len(self) - 1
329 if lrev != tiprev:
329 if lrev != tiprev:
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
331 self._updatebranchcache(partial, ctxgen)
331 self._updatebranchcache(partial, ctxgen)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
333
333
334 return partial
334 return partial
335
335
336 def branchmap(self):
336 def branchmap(self):
337 '''returns a dictionary {branch: [branchheads]}'''
337 '''returns a dictionary {branch: [branchheads]}'''
338 tip = self.changelog.tip()
338 tip = self.changelog.tip()
339 if self._branchcache is not None and self._branchcachetip == tip:
339 if self._branchcache is not None and self._branchcachetip == tip:
340 return self._branchcache
340 return self._branchcache
341
341
342 oldtip = self._branchcachetip
342 oldtip = self._branchcachetip
343 self._branchcachetip = tip
343 self._branchcachetip = tip
344 if oldtip is None or oldtip not in self.changelog.nodemap:
344 if oldtip is None or oldtip not in self.changelog.nodemap:
345 partial, last, lrev = self._readbranchcache()
345 partial, last, lrev = self._readbranchcache()
346 else:
346 else:
347 lrev = self.changelog.rev(oldtip)
347 lrev = self.changelog.rev(oldtip)
348 partial = self._branchcache
348 partial = self._branchcache
349
349
350 self._branchtags(partial, lrev)
350 self._branchtags(partial, lrev)
351 # this private cache holds all heads (not just tips)
351 # this private cache holds all heads (not just tips)
352 self._branchcache = partial
352 self._branchcache = partial
353
353
354 return self._branchcache
354 return self._branchcache
355
355
356 def branchtags(self):
356 def branchtags(self):
357 '''return a dict where branch names map to the tipmost head of
357 '''return a dict where branch names map to the tipmost head of
358 the branch, open heads come before closed'''
358 the branch, open heads come before closed'''
359 bt = {}
359 bt = {}
360 for bn, heads in self.branchmap().iteritems():
360 for bn, heads in self.branchmap().iteritems():
361 tip = heads[-1]
361 tip = heads[-1]
362 for h in reversed(heads):
362 for h in reversed(heads):
363 if 'close' not in self.changelog.read(h)[5]:
363 if 'close' not in self.changelog.read(h)[5]:
364 tip = h
364 tip = h
365 break
365 break
366 bt[bn] = tip
366 bt[bn] = tip
367 return bt
367 return bt
368
368
369
369
370 def _readbranchcache(self):
370 def _readbranchcache(self):
371 partial = {}
371 partial = {}
372 try:
372 try:
373 f = self.opener("branchheads.cache")
373 f = self.opener("branchheads.cache")
374 lines = f.read().split('\n')
374 lines = f.read().split('\n')
375 f.close()
375 f.close()
376 except (IOError, OSError):
376 except (IOError, OSError):
377 return {}, nullid, nullrev
377 return {}, nullid, nullrev
378
378
379 try:
379 try:
380 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = lines.pop(0).split(" ", 1)
381 last, lrev = bin(last), int(lrev)
381 last, lrev = bin(last), int(lrev)
382 if lrev >= len(self) or self[lrev].node() != last:
382 if lrev >= len(self) or self[lrev].node() != last:
383 # invalidate the cache
383 # invalidate the cache
384 raise ValueError('invalidating branch cache (tip differs)')
384 raise ValueError('invalidating branch cache (tip differs)')
385 for l in lines:
385 for l in lines:
386 if not l:
386 if not l:
387 continue
387 continue
388 node, label = l.split(" ", 1)
388 node, label = l.split(" ", 1)
389 partial.setdefault(label.strip(), []).append(bin(node))
389 partial.setdefault(label.strip(), []).append(bin(node))
390 except KeyboardInterrupt:
390 except KeyboardInterrupt:
391 raise
391 raise
392 except Exception, inst:
392 except Exception, inst:
393 if self.ui.debugflag:
393 if self.ui.debugflag:
394 self.ui.warn(str(inst), '\n')
394 self.ui.warn(str(inst), '\n')
395 partial, last, lrev = {}, nullid, nullrev
395 partial, last, lrev = {}, nullid, nullrev
396 return partial, last, lrev
396 return partial, last, lrev
397
397
398 def _writebranchcache(self, branches, tip, tiprev):
398 def _writebranchcache(self, branches, tip, tiprev):
399 try:
399 try:
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
401 f.write("%s %s\n" % (hex(tip), tiprev))
401 f.write("%s %s\n" % (hex(tip), tiprev))
402 for label, nodes in branches.iteritems():
402 for label, nodes in branches.iteritems():
403 for node in nodes:
403 for node in nodes:
404 f.write("%s %s\n" % (hex(node), label))
404 f.write("%s %s\n" % (hex(node), label))
405 f.rename()
405 f.rename()
406 except (IOError, OSError):
406 except (IOError, OSError):
407 pass
407 pass
408
408
409 def _updatebranchcache(self, partial, ctxgen):
409 def _updatebranchcache(self, partial, ctxgen):
410 # collect new branch entries
410 # collect new branch entries
411 newbranches = {}
411 newbranches = {}
412 for c in ctxgen:
412 for c in ctxgen:
413 newbranches.setdefault(c.branch(), []).append(c.node())
413 newbranches.setdefault(c.branch(), []).append(c.node())
414 # if older branchheads are reachable from new ones, they aren't
414 # if older branchheads are reachable from new ones, they aren't
415 # really branchheads. Note checking parents is insufficient:
415 # really branchheads. Note checking parents is insufficient:
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
417 for branch, newnodes in newbranches.iteritems():
417 for branch, newnodes in newbranches.iteritems():
418 bheads = partial.setdefault(branch, [])
418 bheads = partial.setdefault(branch, [])
419 bheads.extend(newnodes)
419 bheads.extend(newnodes)
420 if len(bheads) <= 1:
420 if len(bheads) <= 1:
421 continue
421 continue
422 # starting from tip means fewer passes over reachable
422 # starting from tip means fewer passes over reachable
423 while newnodes:
423 while newnodes:
424 latest = newnodes.pop()
424 latest = newnodes.pop()
425 if latest not in bheads:
425 if latest not in bheads:
426 continue
426 continue
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
428 reachable = self.changelog.reachable(latest, minbhrev)
428 reachable = self.changelog.reachable(latest, minbhrev)
429 reachable.remove(latest)
429 reachable.remove(latest)
430 bheads = [b for b in bheads if b not in reachable]
430 bheads = [b for b in bheads if b not in reachable]
431 partial[branch] = bheads
431 partial[branch] = bheads
432
432
433 def lookup(self, key):
433 def lookup(self, key):
434 if isinstance(key, int):
434 if isinstance(key, int):
435 return self.changelog.node(key)
435 return self.changelog.node(key)
436 elif key == '.':
436 elif key == '.':
437 return self.dirstate.parents()[0]
437 return self.dirstate.parents()[0]
438 elif key == 'null':
438 elif key == 'null':
439 return nullid
439 return nullid
440 elif key == 'tip':
440 elif key == 'tip':
441 return self.changelog.tip()
441 return self.changelog.tip()
442 n = self.changelog._match(key)
442 n = self.changelog._match(key)
443 if n:
443 if n:
444 return n
444 return n
445 if key in self.tags():
445 if key in self.tags():
446 return self.tags()[key]
446 return self.tags()[key]
447 if key in self.branchtags():
447 if key in self.branchtags():
448 return self.branchtags()[key]
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
450 if n:
450 if n:
451 return n
451 return n
452
452
453 # can't find key, check if it might have come from damaged dirstate
453 # can't find key, check if it might have come from damaged dirstate
454 if key in self.dirstate.parents():
454 if key in self.dirstate.parents():
455 raise error.Abort(_("working directory has unknown parent '%s'!")
455 raise error.Abort(_("working directory has unknown parent '%s'!")
456 % short(key))
456 % short(key))
457 try:
457 try:
458 if len(key) == 20:
458 if len(key) == 20:
459 key = hex(key)
459 key = hex(key)
460 except:
460 except:
461 pass
461 pass
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
463
463
464 def lookupbranch(self, key, remote=None):
464 def lookupbranch(self, key, remote=None):
465 repo = remote or self
465 repo = remote or self
466 if key in repo.branchmap():
466 if key in repo.branchmap():
467 return key
467 return key
468
468
469 repo = (remote and remote.local()) and remote or self
469 repo = (remote and remote.local()) and remote or self
470 return repo[key].branch()
470 return repo[key].branch()
471
471
472 def local(self):
472 def local(self):
473 return True
473 return True
474
474
475 def join(self, f):
475 def join(self, f):
476 return os.path.join(self.path, f)
476 return os.path.join(self.path, f)
477
477
478 def wjoin(self, f):
478 def wjoin(self, f):
479 return os.path.join(self.root, f)
479 return os.path.join(self.root, f)
480
480
481 def rjoin(self, f):
481 def rjoin(self, f):
482 return os.path.join(self.root, util.pconvert(f))
482 return os.path.join(self.root, util.pconvert(f))
483
483
484 def file(self, f):
484 def file(self, f):
485 if f[0] == '/':
485 if f[0] == '/':
486 f = f[1:]
486 f = f[1:]
487 return filelog.filelog(self.sopener, f)
487 return filelog.filelog(self.sopener, f)
488
488
489 def changectx(self, changeid):
489 def changectx(self, changeid):
490 return self[changeid]
490 return self[changeid]
491
491
492 def parents(self, changeid=None):
492 def parents(self, changeid=None):
493 '''get list of changectxs for parents of changeid'''
493 '''get list of changectxs for parents of changeid'''
494 return self[changeid].parents()
494 return self[changeid].parents()
495
495
496 def filectx(self, path, changeid=None, fileid=None):
496 def filectx(self, path, changeid=None, fileid=None):
497 """changeid can be a changeset revision, node, or tag.
497 """changeid can be a changeset revision, node, or tag.
498 fileid can be a file revision or node."""
498 fileid can be a file revision or node."""
499 return context.filectx(self, path, changeid, fileid)
499 return context.filectx(self, path, changeid, fileid)
500
500
501 def getcwd(self):
501 def getcwd(self):
502 return self.dirstate.getcwd()
502 return self.dirstate.getcwd()
503
503
504 def pathto(self, f, cwd=None):
504 def pathto(self, f, cwd=None):
505 return self.dirstate.pathto(f, cwd)
505 return self.dirstate.pathto(f, cwd)
506
506
507 def wfile(self, f, mode='r'):
507 def wfile(self, f, mode='r'):
508 return self.wopener(f, mode)
508 return self.wopener(f, mode)
509
509
510 def _link(self, f):
510 def _link(self, f):
511 return os.path.islink(self.wjoin(f))
511 return os.path.islink(self.wjoin(f))
512
512
513 def _filter(self, filter, filename, data):
513 def _loadfilter(self, filter):
514 if filter not in self.filterpats:
514 if filter not in self.filterpats:
515 l = []
515 l = []
516 for pat, cmd in self.ui.configitems(filter):
516 for pat, cmd in self.ui.configitems(filter):
517 if cmd == '!':
517 if cmd == '!':
518 continue
518 continue
519 mf = matchmod.match(self.root, '', [pat])
519 mf = matchmod.match(self.root, '', [pat])
520 fn = None
520 fn = None
521 params = cmd
521 params = cmd
522 for name, filterfn in self._datafilters.iteritems():
522 for name, filterfn in self._datafilters.iteritems():
523 if cmd.startswith(name):
523 if cmd.startswith(name):
524 fn = filterfn
524 fn = filterfn
525 params = cmd[len(name):].lstrip()
525 params = cmd[len(name):].lstrip()
526 break
526 break
527 if not fn:
527 if not fn:
528 fn = lambda s, c, **kwargs: util.filter(s, c)
528 fn = lambda s, c, **kwargs: util.filter(s, c)
529 # Wrap old filters not supporting keyword arguments
529 # Wrap old filters not supporting keyword arguments
530 if not inspect.getargspec(fn)[2]:
530 if not inspect.getargspec(fn)[2]:
531 oldfn = fn
531 oldfn = fn
532 fn = lambda s, c, **kwargs: oldfn(s, c)
532 fn = lambda s, c, **kwargs: oldfn(s, c)
533 l.append((mf, fn, params))
533 l.append((mf, fn, params))
534 self.filterpats[filter] = l
534 self.filterpats[filter] = l
535
535
536 def _filter(self, filter, filename, data):
537 self._loadfilter(filter)
538
536 for mf, fn, cmd in self.filterpats[filter]:
539 for mf, fn, cmd in self.filterpats[filter]:
537 if mf(filename):
540 if mf(filename):
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
541 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
542 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
540 break
543 break
541
544
542 return data
545 return data
543
546
544 def adddatafilter(self, name, filter):
547 def adddatafilter(self, name, filter):
545 self._datafilters[name] = filter
548 self._datafilters[name] = filter
546
549
547 def wread(self, filename):
550 def wread(self, filename):
548 if self._link(filename):
551 if self._link(filename):
549 data = os.readlink(self.wjoin(filename))
552 data = os.readlink(self.wjoin(filename))
550 else:
553 else:
551 data = self.wopener(filename, 'r').read()
554 data = self.wopener(filename, 'r').read()
552 return self._filter("encode", filename, data)
555 return self._filter("encode", filename, data)
553
556
554 def wwrite(self, filename, data, flags):
557 def wwrite(self, filename, data, flags):
555 data = self._filter("decode", filename, data)
558 data = self._filter("decode", filename, data)
556 try:
559 try:
557 os.unlink(self.wjoin(filename))
560 os.unlink(self.wjoin(filename))
558 except OSError:
561 except OSError:
559 pass
562 pass
560 if 'l' in flags:
563 if 'l' in flags:
561 self.wopener.symlink(data, filename)
564 self.wopener.symlink(data, filename)
562 else:
565 else:
563 self.wopener(filename, 'w').write(data)
566 self.wopener(filename, 'w').write(data)
564 if 'x' in flags:
567 if 'x' in flags:
565 util.set_flags(self.wjoin(filename), False, True)
568 util.set_flags(self.wjoin(filename), False, True)
566
569
567 def wwritedata(self, filename, data):
570 def wwritedata(self, filename, data):
568 return self._filter("decode", filename, data)
571 return self._filter("decode", filename, data)
569
572
570 def transaction(self, desc):
573 def transaction(self, desc):
571 tr = self._transref and self._transref() or None
574 tr = self._transref and self._transref() or None
572 if tr and tr.running():
575 if tr and tr.running():
573 return tr.nest()
576 return tr.nest()
574
577
575 # abort here if the journal already exists
578 # abort here if the journal already exists
576 if os.path.exists(self.sjoin("journal")):
579 if os.path.exists(self.sjoin("journal")):
577 raise error.RepoError(
580 raise error.RepoError(
578 _("abandoned transaction found - run hg recover"))
581 _("abandoned transaction found - run hg recover"))
579
582
580 # save dirstate for rollback
583 # save dirstate for rollback
581 try:
584 try:
582 ds = self.opener("dirstate").read()
585 ds = self.opener("dirstate").read()
583 except IOError:
586 except IOError:
584 ds = ""
587 ds = ""
585 self.opener("journal.dirstate", "w").write(ds)
588 self.opener("journal.dirstate", "w").write(ds)
586 self.opener("journal.branch", "w").write(self.dirstate.branch())
589 self.opener("journal.branch", "w").write(self.dirstate.branch())
587 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
590 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
588
591
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
592 renames = [(self.sjoin("journal"), self.sjoin("undo")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
593 (self.join("journal.dirstate"), self.join("undo.dirstate")),
591 (self.join("journal.branch"), self.join("undo.branch")),
594 (self.join("journal.branch"), self.join("undo.branch")),
592 (self.join("journal.desc"), self.join("undo.desc"))]
595 (self.join("journal.desc"), self.join("undo.desc"))]
593 tr = transaction.transaction(self.ui.warn, self.sopener,
596 tr = transaction.transaction(self.ui.warn, self.sopener,
594 self.sjoin("journal"),
597 self.sjoin("journal"),
595 aftertrans(renames),
598 aftertrans(renames),
596 self.store.createmode)
599 self.store.createmode)
597 self._transref = weakref.ref(tr)
600 self._transref = weakref.ref(tr)
598 return tr
601 return tr
599
602
600 def recover(self):
603 def recover(self):
601 lock = self.lock()
604 lock = self.lock()
602 try:
605 try:
603 if os.path.exists(self.sjoin("journal")):
606 if os.path.exists(self.sjoin("journal")):
604 self.ui.status(_("rolling back interrupted transaction\n"))
607 self.ui.status(_("rolling back interrupted transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("journal"),
608 transaction.rollback(self.sopener, self.sjoin("journal"),
606 self.ui.warn)
609 self.ui.warn)
607 self.invalidate()
610 self.invalidate()
608 return True
611 return True
609 else:
612 else:
610 self.ui.warn(_("no interrupted transaction available\n"))
613 self.ui.warn(_("no interrupted transaction available\n"))
611 return False
614 return False
612 finally:
615 finally:
613 lock.release()
616 lock.release()
614
617
615 def rollback(self, dryrun=False):
618 def rollback(self, dryrun=False):
616 wlock = lock = None
619 wlock = lock = None
617 try:
620 try:
618 wlock = self.wlock()
621 wlock = self.wlock()
619 lock = self.lock()
622 lock = self.lock()
620 if os.path.exists(self.sjoin("undo")):
623 if os.path.exists(self.sjoin("undo")):
621 try:
624 try:
622 args = self.opener("undo.desc", "r").read().splitlines()
625 args = self.opener("undo.desc", "r").read().splitlines()
623 if len(args) >= 3 and self.ui.verbose:
626 if len(args) >= 3 and self.ui.verbose:
624 desc = _("rolling back to revision %s"
627 desc = _("rolling back to revision %s"
625 " (undo %s: %s)\n") % (
628 " (undo %s: %s)\n") % (
626 int(args[0]) - 1, args[1], args[2])
629 int(args[0]) - 1, args[1], args[2])
627 elif len(args) >= 2:
630 elif len(args) >= 2:
628 desc = _("rolling back to revision %s (undo %s)\n") % (
631 desc = _("rolling back to revision %s (undo %s)\n") % (
629 int(args[0]) - 1, args[1])
632 int(args[0]) - 1, args[1])
630 except IOError:
633 except IOError:
631 desc = _("rolling back unknown transaction\n")
634 desc = _("rolling back unknown transaction\n")
632 self.ui.status(desc)
635 self.ui.status(desc)
633 if dryrun:
636 if dryrun:
634 return
637 return
635 transaction.rollback(self.sopener, self.sjoin("undo"),
638 transaction.rollback(self.sopener, self.sjoin("undo"),
636 self.ui.warn)
639 self.ui.warn)
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
640 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 try:
641 try:
639 branch = self.opener("undo.branch").read()
642 branch = self.opener("undo.branch").read()
640 self.dirstate.setbranch(branch)
643 self.dirstate.setbranch(branch)
641 except IOError:
644 except IOError:
642 self.ui.warn(_("Named branch could not be reset, "
645 self.ui.warn(_("Named branch could not be reset, "
643 "current branch still is: %s\n")
646 "current branch still is: %s\n")
644 % encoding.tolocal(self.dirstate.branch()))
647 % encoding.tolocal(self.dirstate.branch()))
645 self.invalidate()
648 self.invalidate()
646 self.dirstate.invalidate()
649 self.dirstate.invalidate()
647 self.destroyed()
650 self.destroyed()
648 else:
651 else:
649 self.ui.warn(_("no rollback information available\n"))
652 self.ui.warn(_("no rollback information available\n"))
650 return 1
653 return 1
651 finally:
654 finally:
652 release(lock, wlock)
655 release(lock, wlock)
653
656
654 def invalidatecaches(self):
657 def invalidatecaches(self):
655 self._tags = None
658 self._tags = None
656 self._tagtypes = None
659 self._tagtypes = None
657 self.nodetagscache = None
660 self.nodetagscache = None
658 self._branchcache = None # in UTF-8
661 self._branchcache = None # in UTF-8
659 self._branchcachetip = None
662 self._branchcachetip = None
660
663
661 def invalidate(self):
664 def invalidate(self):
662 for a in "changelog manifest".split():
665 for a in "changelog manifest".split():
663 if a in self.__dict__:
666 if a in self.__dict__:
664 delattr(self, a)
667 delattr(self, a)
665 self.invalidatecaches()
668 self.invalidatecaches()
666
669
667 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
670 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
668 try:
671 try:
669 l = lock.lock(lockname, 0, releasefn, desc=desc)
672 l = lock.lock(lockname, 0, releasefn, desc=desc)
670 except error.LockHeld, inst:
673 except error.LockHeld, inst:
671 if not wait:
674 if not wait:
672 raise
675 raise
673 self.ui.warn(_("waiting for lock on %s held by %r\n") %
676 self.ui.warn(_("waiting for lock on %s held by %r\n") %
674 (desc, inst.locker))
677 (desc, inst.locker))
675 # default to 600 seconds timeout
678 # default to 600 seconds timeout
676 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
679 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
677 releasefn, desc=desc)
680 releasefn, desc=desc)
678 if acquirefn:
681 if acquirefn:
679 acquirefn()
682 acquirefn()
680 return l
683 return l
681
684
682 def lock(self, wait=True):
685 def lock(self, wait=True):
683 '''Lock the repository store (.hg/store) and return a weak reference
686 '''Lock the repository store (.hg/store) and return a weak reference
684 to the lock. Use this before modifying the store (e.g. committing or
687 to the lock. Use this before modifying the store (e.g. committing or
685 stripping). If you are opening a transaction, get a lock as well.)'''
688 stripping). If you are opening a transaction, get a lock as well.)'''
686 l = self._lockref and self._lockref()
689 l = self._lockref and self._lockref()
687 if l is not None and l.held:
690 if l is not None and l.held:
688 l.lock()
691 l.lock()
689 return l
692 return l
690
693
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
694 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
692 _('repository %s') % self.origroot)
695 _('repository %s') % self.origroot)
693 self._lockref = weakref.ref(l)
696 self._lockref = weakref.ref(l)
694 return l
697 return l
695
698
696 def wlock(self, wait=True):
699 def wlock(self, wait=True):
697 '''Lock the non-store parts of the repository (everything under
700 '''Lock the non-store parts of the repository (everything under
698 .hg except .hg/store) and return a weak reference to the lock.
701 .hg except .hg/store) and return a weak reference to the lock.
699 Use this before modifying files in .hg.'''
702 Use this before modifying files in .hg.'''
700 l = self._wlockref and self._wlockref()
703 l = self._wlockref and self._wlockref()
701 if l is not None and l.held:
704 if l is not None and l.held:
702 l.lock()
705 l.lock()
703 return l
706 return l
704
707
705 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
708 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
706 self.dirstate.invalidate, _('working directory of %s') %
709 self.dirstate.invalidate, _('working directory of %s') %
707 self.origroot)
710 self.origroot)
708 self._wlockref = weakref.ref(l)
711 self._wlockref = weakref.ref(l)
709 return l
712 return l
710
713
711 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
714 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
712 """
715 """
713 commit an individual file as part of a larger transaction
716 commit an individual file as part of a larger transaction
714 """
717 """
715
718
716 fname = fctx.path()
719 fname = fctx.path()
717 text = fctx.data()
720 text = fctx.data()
718 flog = self.file(fname)
721 flog = self.file(fname)
719 fparent1 = manifest1.get(fname, nullid)
722 fparent1 = manifest1.get(fname, nullid)
720 fparent2 = fparent2o = manifest2.get(fname, nullid)
723 fparent2 = fparent2o = manifest2.get(fname, nullid)
721
724
722 meta = {}
725 meta = {}
723 copy = fctx.renamed()
726 copy = fctx.renamed()
724 if copy and copy[0] != fname:
727 if copy and copy[0] != fname:
725 # Mark the new revision of this file as a copy of another
728 # Mark the new revision of this file as a copy of another
726 # file. This copy data will effectively act as a parent
729 # file. This copy data will effectively act as a parent
727 # of this new revision. If this is a merge, the first
730 # of this new revision. If this is a merge, the first
728 # parent will be the nullid (meaning "look up the copy data")
731 # parent will be the nullid (meaning "look up the copy data")
729 # and the second one will be the other parent. For example:
732 # and the second one will be the other parent. For example:
730 #
733 #
731 # 0 --- 1 --- 3 rev1 changes file foo
734 # 0 --- 1 --- 3 rev1 changes file foo
732 # \ / rev2 renames foo to bar and changes it
735 # \ / rev2 renames foo to bar and changes it
733 # \- 2 -/ rev3 should have bar with all changes and
736 # \- 2 -/ rev3 should have bar with all changes and
734 # should record that bar descends from
737 # should record that bar descends from
735 # bar in rev2 and foo in rev1
738 # bar in rev2 and foo in rev1
736 #
739 #
737 # this allows this merge to succeed:
740 # this allows this merge to succeed:
738 #
741 #
739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
742 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
740 # \ / merging rev3 and rev4 should use bar@rev2
743 # \ / merging rev3 and rev4 should use bar@rev2
741 # \- 2 --- 4 as the merge base
744 # \- 2 --- 4 as the merge base
742 #
745 #
743
746
744 cfname = copy[0]
747 cfname = copy[0]
745 crev = manifest1.get(cfname)
748 crev = manifest1.get(cfname)
746 newfparent = fparent2
749 newfparent = fparent2
747
750
748 if manifest2: # branch merge
751 if manifest2: # branch merge
749 if fparent2 == nullid or crev is None: # copied on remote side
752 if fparent2 == nullid or crev is None: # copied on remote side
750 if cfname in manifest2:
753 if cfname in manifest2:
751 crev = manifest2[cfname]
754 crev = manifest2[cfname]
752 newfparent = fparent1
755 newfparent = fparent1
753
756
754 # find source in nearest ancestor if we've lost track
757 # find source in nearest ancestor if we've lost track
755 if not crev:
758 if not crev:
756 self.ui.debug(" %s: searching for copy revision for %s\n" %
759 self.ui.debug(" %s: searching for copy revision for %s\n" %
757 (fname, cfname))
760 (fname, cfname))
758 for ancestor in self['.'].ancestors():
761 for ancestor in self['.'].ancestors():
759 if cfname in ancestor:
762 if cfname in ancestor:
760 crev = ancestor[cfname].filenode()
763 crev = ancestor[cfname].filenode()
761 break
764 break
762
765
763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
766 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
764 meta["copy"] = cfname
767 meta["copy"] = cfname
765 meta["copyrev"] = hex(crev)
768 meta["copyrev"] = hex(crev)
766 fparent1, fparent2 = nullid, newfparent
769 fparent1, fparent2 = nullid, newfparent
767 elif fparent2 != nullid:
770 elif fparent2 != nullid:
768 # is one parent an ancestor of the other?
771 # is one parent an ancestor of the other?
769 fparentancestor = flog.ancestor(fparent1, fparent2)
772 fparentancestor = flog.ancestor(fparent1, fparent2)
770 if fparentancestor == fparent1:
773 if fparentancestor == fparent1:
771 fparent1, fparent2 = fparent2, nullid
774 fparent1, fparent2 = fparent2, nullid
772 elif fparentancestor == fparent2:
775 elif fparentancestor == fparent2:
773 fparent2 = nullid
776 fparent2 = nullid
774
777
775 # is the file changed?
778 # is the file changed?
776 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
779 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
777 changelist.append(fname)
780 changelist.append(fname)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
781 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779
782
780 # are just the flags changed during merge?
783 # are just the flags changed during merge?
781 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
784 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
782 changelist.append(fname)
785 changelist.append(fname)
783
786
784 return fparent1
787 return fparent1
785
788
786 def commit(self, text="", user=None, date=None, match=None, force=False,
789 def commit(self, text="", user=None, date=None, match=None, force=False,
787 editor=False, extra={}):
790 editor=False, extra={}):
788 """Add a new revision to current repository.
791 """Add a new revision to current repository.
789
792
790 Revision information is gathered from the working directory,
793 Revision information is gathered from the working directory,
791 match can be used to filter the committed files. If editor is
794 match can be used to filter the committed files. If editor is
792 supplied, it is called to get a commit message.
795 supplied, it is called to get a commit message.
793 """
796 """
794
797
795 def fail(f, msg):
798 def fail(f, msg):
796 raise util.Abort('%s: %s' % (f, msg))
799 raise util.Abort('%s: %s' % (f, msg))
797
800
798 if not match:
801 if not match:
799 match = matchmod.always(self.root, '')
802 match = matchmod.always(self.root, '')
800
803
801 if not force:
804 if not force:
802 vdirs = []
805 vdirs = []
803 match.dir = vdirs.append
806 match.dir = vdirs.append
804 match.bad = fail
807 match.bad = fail
805
808
806 wlock = self.wlock()
809 wlock = self.wlock()
807 try:
810 try:
808 wctx = self[None]
811 wctx = self[None]
809 merge = len(wctx.parents()) > 1
812 merge = len(wctx.parents()) > 1
810
813
811 if (not force and merge and match and
814 if (not force and merge and match and
812 (match.files() or match.anypats())):
815 (match.files() or match.anypats())):
813 raise util.Abort(_('cannot partially commit a merge '
816 raise util.Abort(_('cannot partially commit a merge '
814 '(do not specify files or patterns)'))
817 '(do not specify files or patterns)'))
815
818
816 changes = self.status(match=match, clean=force)
819 changes = self.status(match=match, clean=force)
817 if force:
820 if force:
818 changes[0].extend(changes[6]) # mq may commit unchanged files
821 changes[0].extend(changes[6]) # mq may commit unchanged files
819
822
820 # check subrepos
823 # check subrepos
821 subs = []
824 subs = []
822 removedsubs = set()
825 removedsubs = set()
823 for p in wctx.parents():
826 for p in wctx.parents():
824 removedsubs.update(s for s in p.substate if match(s))
827 removedsubs.update(s for s in p.substate if match(s))
825 for s in wctx.substate:
828 for s in wctx.substate:
826 removedsubs.discard(s)
829 removedsubs.discard(s)
827 if match(s) and wctx.sub(s).dirty():
830 if match(s) and wctx.sub(s).dirty():
828 subs.append(s)
831 subs.append(s)
829 if (subs or removedsubs):
832 if (subs or removedsubs):
830 if (not match('.hgsub') and
833 if (not match('.hgsub') and
831 '.hgsub' in (wctx.modified() + wctx.added())):
834 '.hgsub' in (wctx.modified() + wctx.added())):
832 raise util.Abort(_("can't commit subrepos without .hgsub"))
835 raise util.Abort(_("can't commit subrepos without .hgsub"))
833 if '.hgsubstate' not in changes[0]:
836 if '.hgsubstate' not in changes[0]:
834 changes[0].insert(0, '.hgsubstate')
837 changes[0].insert(0, '.hgsubstate')
835
838
836 # make sure all explicit patterns are matched
839 # make sure all explicit patterns are matched
837 if not force and match.files():
840 if not force and match.files():
838 matched = set(changes[0] + changes[1] + changes[2])
841 matched = set(changes[0] + changes[1] + changes[2])
839
842
840 for f in match.files():
843 for f in match.files():
841 if f == '.' or f in matched or f in wctx.substate:
844 if f == '.' or f in matched or f in wctx.substate:
842 continue
845 continue
843 if f in changes[3]: # missing
846 if f in changes[3]: # missing
844 fail(f, _('file not found!'))
847 fail(f, _('file not found!'))
845 if f in vdirs: # visited directory
848 if f in vdirs: # visited directory
846 d = f + '/'
849 d = f + '/'
847 for mf in matched:
850 for mf in matched:
848 if mf.startswith(d):
851 if mf.startswith(d):
849 break
852 break
850 else:
853 else:
851 fail(f, _("no match under directory!"))
854 fail(f, _("no match under directory!"))
852 elif f not in self.dirstate:
855 elif f not in self.dirstate:
853 fail(f, _("file not tracked!"))
856 fail(f, _("file not tracked!"))
854
857
855 if (not force and not extra.get("close") and not merge
858 if (not force and not extra.get("close") and not merge
856 and not (changes[0] or changes[1] or changes[2])
859 and not (changes[0] or changes[1] or changes[2])
857 and wctx.branch() == wctx.p1().branch()):
860 and wctx.branch() == wctx.p1().branch()):
858 return None
861 return None
859
862
860 ms = mergemod.mergestate(self)
863 ms = mergemod.mergestate(self)
861 for f in changes[0]:
864 for f in changes[0]:
862 if f in ms and ms[f] == 'u':
865 if f in ms and ms[f] == 'u':
863 raise util.Abort(_("unresolved merge conflicts "
866 raise util.Abort(_("unresolved merge conflicts "
864 "(see hg resolve)"))
867 "(see hg resolve)"))
865
868
866 cctx = context.workingctx(self, text, user, date, extra, changes)
869 cctx = context.workingctx(self, text, user, date, extra, changes)
867 if editor:
870 if editor:
868 cctx._text = editor(self, cctx, subs)
871 cctx._text = editor(self, cctx, subs)
869 edited = (text != cctx._text)
872 edited = (text != cctx._text)
870
873
871 # commit subs
874 # commit subs
872 if subs or removedsubs:
875 if subs or removedsubs:
873 state = wctx.substate.copy()
876 state = wctx.substate.copy()
874 for s in subs:
877 for s in subs:
875 sub = wctx.sub(s)
878 sub = wctx.sub(s)
876 self.ui.status(_('committing subrepository %s\n') %
879 self.ui.status(_('committing subrepository %s\n') %
877 subrepo.relpath(sub))
880 subrepo.relpath(sub))
878 sr = sub.commit(cctx._text, user, date)
881 sr = sub.commit(cctx._text, user, date)
879 state[s] = (state[s][0], sr)
882 state[s] = (state[s][0], sr)
880 subrepo.writestate(self, state)
883 subrepo.writestate(self, state)
881
884
882 # Save commit message in case this transaction gets rolled back
885 # Save commit message in case this transaction gets rolled back
883 # (e.g. by a pretxncommit hook). Leave the content alone on
886 # (e.g. by a pretxncommit hook). Leave the content alone on
884 # the assumption that the user will use the same editor again.
887 # the assumption that the user will use the same editor again.
885 msgfile = self.opener('last-message.txt', 'wb')
888 msgfile = self.opener('last-message.txt', 'wb')
886 msgfile.write(cctx._text)
889 msgfile.write(cctx._text)
887 msgfile.close()
890 msgfile.close()
888
891
889 p1, p2 = self.dirstate.parents()
892 p1, p2 = self.dirstate.parents()
890 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
893 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
891 try:
894 try:
892 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
895 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
893 ret = self.commitctx(cctx, True)
896 ret = self.commitctx(cctx, True)
894 except:
897 except:
895 if edited:
898 if edited:
896 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
899 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
897 self.ui.write(
900 self.ui.write(
898 _('note: commit message saved in %s\n') % msgfn)
901 _('note: commit message saved in %s\n') % msgfn)
899 raise
902 raise
900
903
901 # update dirstate and mergestate
904 # update dirstate and mergestate
902 for f in changes[0] + changes[1]:
905 for f in changes[0] + changes[1]:
903 self.dirstate.normal(f)
906 self.dirstate.normal(f)
904 for f in changes[2]:
907 for f in changes[2]:
905 self.dirstate.forget(f)
908 self.dirstate.forget(f)
906 self.dirstate.setparents(ret)
909 self.dirstate.setparents(ret)
907 ms.reset()
910 ms.reset()
908 finally:
911 finally:
909 wlock.release()
912 wlock.release()
910
913
911 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
914 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
912 return ret
915 return ret
913
916
914 def commitctx(self, ctx, error=False):
917 def commitctx(self, ctx, error=False):
915 """Add a new revision to current repository.
918 """Add a new revision to current repository.
916 Revision information is passed via the context argument.
919 Revision information is passed via the context argument.
917 """
920 """
918
921
919 tr = lock = None
922 tr = lock = None
920 removed = ctx.removed()
923 removed = ctx.removed()
921 p1, p2 = ctx.p1(), ctx.p2()
924 p1, p2 = ctx.p1(), ctx.p2()
922 m1 = p1.manifest().copy()
925 m1 = p1.manifest().copy()
923 m2 = p2.manifest()
926 m2 = p2.manifest()
924 user = ctx.user()
927 user = ctx.user()
925
928
926 lock = self.lock()
929 lock = self.lock()
927 try:
930 try:
928 tr = self.transaction("commit")
931 tr = self.transaction("commit")
929 trp = weakref.proxy(tr)
932 trp = weakref.proxy(tr)
930
933
931 # check in files
934 # check in files
932 new = {}
935 new = {}
933 changed = []
936 changed = []
934 linkrev = len(self)
937 linkrev = len(self)
935 for f in sorted(ctx.modified() + ctx.added()):
938 for f in sorted(ctx.modified() + ctx.added()):
936 self.ui.note(f + "\n")
939 self.ui.note(f + "\n")
937 try:
940 try:
938 fctx = ctx[f]
941 fctx = ctx[f]
939 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
942 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
940 changed)
943 changed)
941 m1.set(f, fctx.flags())
944 m1.set(f, fctx.flags())
942 except OSError, inst:
945 except OSError, inst:
943 self.ui.warn(_("trouble committing %s!\n") % f)
946 self.ui.warn(_("trouble committing %s!\n") % f)
944 raise
947 raise
945 except IOError, inst:
948 except IOError, inst:
946 errcode = getattr(inst, 'errno', errno.ENOENT)
949 errcode = getattr(inst, 'errno', errno.ENOENT)
947 if error or errcode and errcode != errno.ENOENT:
950 if error or errcode and errcode != errno.ENOENT:
948 self.ui.warn(_("trouble committing %s!\n") % f)
951 self.ui.warn(_("trouble committing %s!\n") % f)
949 raise
952 raise
950 else:
953 else:
951 removed.append(f)
954 removed.append(f)
952
955
953 # update manifest
956 # update manifest
954 m1.update(new)
957 m1.update(new)
955 removed = [f for f in sorted(removed) if f in m1 or f in m2]
958 removed = [f for f in sorted(removed) if f in m1 or f in m2]
956 drop = [f for f in removed if f in m1]
959 drop = [f for f in removed if f in m1]
957 for f in drop:
960 for f in drop:
958 del m1[f]
961 del m1[f]
959 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
962 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
960 p2.manifestnode(), (new, drop))
963 p2.manifestnode(), (new, drop))
961
964
962 # update changelog
965 # update changelog
963 self.changelog.delayupdate()
966 self.changelog.delayupdate()
964 n = self.changelog.add(mn, changed + removed, ctx.description(),
967 n = self.changelog.add(mn, changed + removed, ctx.description(),
965 trp, p1.node(), p2.node(),
968 trp, p1.node(), p2.node(),
966 user, ctx.date(), ctx.extra().copy())
969 user, ctx.date(), ctx.extra().copy())
967 p = lambda: self.changelog.writepending() and self.root or ""
970 p = lambda: self.changelog.writepending() and self.root or ""
968 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
971 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
969 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
972 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
970 parent2=xp2, pending=p)
973 parent2=xp2, pending=p)
971 self.changelog.finalize(trp)
974 self.changelog.finalize(trp)
972 tr.close()
975 tr.close()
973
976
974 if self._branchcache:
977 if self._branchcache:
975 self.branchtags()
978 self.branchtags()
976 return n
979 return n
977 finally:
980 finally:
978 if tr:
981 if tr:
979 tr.release()
982 tr.release()
980 lock.release()
983 lock.release()
981
984
982 def destroyed(self):
985 def destroyed(self):
983 '''Inform the repository that nodes have been destroyed.
986 '''Inform the repository that nodes have been destroyed.
984 Intended for use by strip and rollback, so there's a common
987 Intended for use by strip and rollback, so there's a common
985 place for anything that has to be done after destroying history.'''
988 place for anything that has to be done after destroying history.'''
986 # XXX it might be nice if we could take the list of destroyed
989 # XXX it might be nice if we could take the list of destroyed
987 # nodes, but I don't see an easy way for rollback() to do that
990 # nodes, but I don't see an easy way for rollback() to do that
988
991
989 # Ensure the persistent tag cache is updated. Doing it now
992 # Ensure the persistent tag cache is updated. Doing it now
990 # means that the tag cache only has to worry about destroyed
993 # means that the tag cache only has to worry about destroyed
991 # heads immediately after a strip/rollback. That in turn
994 # heads immediately after a strip/rollback. That in turn
992 # guarantees that "cachetip == currenttip" (comparing both rev
995 # guarantees that "cachetip == currenttip" (comparing both rev
993 # and node) always means no nodes have been added or destroyed.
996 # and node) always means no nodes have been added or destroyed.
994
997
995 # XXX this is suboptimal when qrefresh'ing: we strip the current
998 # XXX this is suboptimal when qrefresh'ing: we strip the current
996 # head, refresh the tag cache, then immediately add a new head.
999 # head, refresh the tag cache, then immediately add a new head.
997 # But I think doing it this way is necessary for the "instant
1000 # But I think doing it this way is necessary for the "instant
998 # tag cache retrieval" case to work.
1001 # tag cache retrieval" case to work.
999 self.invalidatecaches()
1002 self.invalidatecaches()
1000
1003
1001 def walk(self, match, node=None):
1004 def walk(self, match, node=None):
1002 '''
1005 '''
1003 walk recursively through the directory tree or a given
1006 walk recursively through the directory tree or a given
1004 changeset, finding all files matched by the match
1007 changeset, finding all files matched by the match
1005 function
1008 function
1006 '''
1009 '''
1007 return self[node].walk(match)
1010 return self[node].walk(match)
1008
1011
1009 def status(self, node1='.', node2=None, match=None,
1012 def status(self, node1='.', node2=None, match=None,
1010 ignored=False, clean=False, unknown=False):
1013 ignored=False, clean=False, unknown=False):
1011 """return status of files between two nodes or node and working directory
1014 """return status of files between two nodes or node and working directory
1012
1015
1013 If node1 is None, use the first dirstate parent instead.
1016 If node1 is None, use the first dirstate parent instead.
1014 If node2 is None, compare node1 with working directory.
1017 If node2 is None, compare node1 with working directory.
1015 """
1018 """
1016
1019
1017 def mfmatches(ctx):
1020 def mfmatches(ctx):
1018 mf = ctx.manifest().copy()
1021 mf = ctx.manifest().copy()
1019 for fn in mf.keys():
1022 for fn in mf.keys():
1020 if not match(fn):
1023 if not match(fn):
1021 del mf[fn]
1024 del mf[fn]
1022 return mf
1025 return mf
1023
1026
1024 if isinstance(node1, context.changectx):
1027 if isinstance(node1, context.changectx):
1025 ctx1 = node1
1028 ctx1 = node1
1026 else:
1029 else:
1027 ctx1 = self[node1]
1030 ctx1 = self[node1]
1028 if isinstance(node2, context.changectx):
1031 if isinstance(node2, context.changectx):
1029 ctx2 = node2
1032 ctx2 = node2
1030 else:
1033 else:
1031 ctx2 = self[node2]
1034 ctx2 = self[node2]
1032
1035
1033 working = ctx2.rev() is None
1036 working = ctx2.rev() is None
1034 parentworking = working and ctx1 == self['.']
1037 parentworking = working and ctx1 == self['.']
1035 match = match or matchmod.always(self.root, self.getcwd())
1038 match = match or matchmod.always(self.root, self.getcwd())
1036 listignored, listclean, listunknown = ignored, clean, unknown
1039 listignored, listclean, listunknown = ignored, clean, unknown
1037
1040
1038 # load earliest manifest first for caching reasons
1041 # load earliest manifest first for caching reasons
1039 if not working and ctx2.rev() < ctx1.rev():
1042 if not working and ctx2.rev() < ctx1.rev():
1040 ctx2.manifest()
1043 ctx2.manifest()
1041
1044
1042 if not parentworking:
1045 if not parentworking:
1043 def bad(f, msg):
1046 def bad(f, msg):
1044 if f not in ctx1:
1047 if f not in ctx1:
1045 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1048 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1046 match.bad = bad
1049 match.bad = bad
1047
1050
1048 if working: # we need to scan the working dir
1051 if working: # we need to scan the working dir
1049 subrepos = []
1052 subrepos = []
1050 if '.hgsub' in self.dirstate:
1053 if '.hgsub' in self.dirstate:
1051 subrepos = ctx1.substate.keys()
1054 subrepos = ctx1.substate.keys()
1052 s = self.dirstate.status(match, subrepos, listignored,
1055 s = self.dirstate.status(match, subrepos, listignored,
1053 listclean, listunknown)
1056 listclean, listunknown)
1054 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1057 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1055
1058
1056 # check for any possibly clean files
1059 # check for any possibly clean files
1057 if parentworking and cmp:
1060 if parentworking and cmp:
1058 fixup = []
1061 fixup = []
1059 # do a full compare of any files that might have changed
1062 # do a full compare of any files that might have changed
1060 for f in sorted(cmp):
1063 for f in sorted(cmp):
1061 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1064 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1062 or ctx1[f].cmp(ctx2[f].data())):
1065 or ctx1[f].cmp(ctx2[f])):
1063 modified.append(f)
1066 modified.append(f)
1064 else:
1067 else:
1065 fixup.append(f)
1068 fixup.append(f)
1066
1069
1067 # update dirstate for files that are actually clean
1070 # update dirstate for files that are actually clean
1068 if fixup:
1071 if fixup:
1069 if listclean:
1072 if listclean:
1070 clean += fixup
1073 clean += fixup
1071
1074
1072 try:
1075 try:
1073 # updating the dirstate is optional
1076 # updating the dirstate is optional
1074 # so we don't wait on the lock
1077 # so we don't wait on the lock
1075 wlock = self.wlock(False)
1078 wlock = self.wlock(False)
1076 try:
1079 try:
1077 for f in fixup:
1080 for f in fixup:
1078 self.dirstate.normal(f)
1081 self.dirstate.normal(f)
1079 finally:
1082 finally:
1080 wlock.release()
1083 wlock.release()
1081 except error.LockError:
1084 except error.LockError:
1082 pass
1085 pass
1083
1086
1084 if not parentworking:
1087 if not parentworking:
1085 mf1 = mfmatches(ctx1)
1088 mf1 = mfmatches(ctx1)
1086 if working:
1089 if working:
1087 # we are comparing working dir against non-parent
1090 # we are comparing working dir against non-parent
1088 # generate a pseudo-manifest for the working dir
1091 # generate a pseudo-manifest for the working dir
1089 mf2 = mfmatches(self['.'])
1092 mf2 = mfmatches(self['.'])
1090 for f in cmp + modified + added:
1093 for f in cmp + modified + added:
1091 mf2[f] = None
1094 mf2[f] = None
1092 mf2.set(f, ctx2.flags(f))
1095 mf2.set(f, ctx2.flags(f))
1093 for f in removed:
1096 for f in removed:
1094 if f in mf2:
1097 if f in mf2:
1095 del mf2[f]
1098 del mf2[f]
1096 else:
1099 else:
1097 # we are comparing two revisions
1100 # we are comparing two revisions
1098 deleted, unknown, ignored = [], [], []
1101 deleted, unknown, ignored = [], [], []
1099 mf2 = mfmatches(ctx2)
1102 mf2 = mfmatches(ctx2)
1100
1103
1101 modified, added, clean = [], [], []
1104 modified, added, clean = [], [], []
1102 for fn in mf2:
1105 for fn in mf2:
1103 if fn in mf1:
1106 if fn in mf1:
1104 if (mf1.flags(fn) != mf2.flags(fn) or
1107 if (mf1.flags(fn) != mf2.flags(fn) or
1105 (mf1[fn] != mf2[fn] and
1108 (mf1[fn] != mf2[fn] and
1106 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1109 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1107 modified.append(fn)
1110 modified.append(fn)
1108 elif listclean:
1111 elif listclean:
1109 clean.append(fn)
1112 clean.append(fn)
1110 del mf1[fn]
1113 del mf1[fn]
1111 else:
1114 else:
1112 added.append(fn)
1115 added.append(fn)
1113 removed = mf1.keys()
1116 removed = mf1.keys()
1114
1117
1115 r = modified, added, removed, deleted, unknown, ignored, clean
1118 r = modified, added, removed, deleted, unknown, ignored, clean
1116 [l.sort() for l in r]
1119 [l.sort() for l in r]
1117 return r
1120 return r
1118
1121
1119 def heads(self, start=None):
1122 def heads(self, start=None):
1120 heads = self.changelog.heads(start)
1123 heads = self.changelog.heads(start)
1121 # sort the output in rev descending order
1124 # sort the output in rev descending order
1122 heads = [(-self.changelog.rev(h), h) for h in heads]
1125 heads = [(-self.changelog.rev(h), h) for h in heads]
1123 return [n for (r, n) in sorted(heads)]
1126 return [n for (r, n) in sorted(heads)]
1124
1127
1125 def branchheads(self, branch=None, start=None, closed=False):
1128 def branchheads(self, branch=None, start=None, closed=False):
1126 '''return a (possibly filtered) list of heads for the given branch
1129 '''return a (possibly filtered) list of heads for the given branch
1127
1130
1128 Heads are returned in topological order, from newest to oldest.
1131 Heads are returned in topological order, from newest to oldest.
1129 If branch is None, use the dirstate branch.
1132 If branch is None, use the dirstate branch.
1130 If start is not None, return only heads reachable from start.
1133 If start is not None, return only heads reachable from start.
1131 If closed is True, return heads that are marked as closed as well.
1134 If closed is True, return heads that are marked as closed as well.
1132 '''
1135 '''
1133 if branch is None:
1136 if branch is None:
1134 branch = self[None].branch()
1137 branch = self[None].branch()
1135 branches = self.branchmap()
1138 branches = self.branchmap()
1136 if branch not in branches:
1139 if branch not in branches:
1137 return []
1140 return []
1138 # the cache returns heads ordered lowest to highest
1141 # the cache returns heads ordered lowest to highest
1139 bheads = list(reversed(branches[branch]))
1142 bheads = list(reversed(branches[branch]))
1140 if start is not None:
1143 if start is not None:
1141 # filter out the heads that cannot be reached from startrev
1144 # filter out the heads that cannot be reached from startrev
1142 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1145 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1143 bheads = [h for h in bheads if h in fbheads]
1146 bheads = [h for h in bheads if h in fbheads]
1144 if not closed:
1147 if not closed:
1145 bheads = [h for h in bheads if
1148 bheads = [h for h in bheads if
1146 ('close' not in self.changelog.read(h)[5])]
1149 ('close' not in self.changelog.read(h)[5])]
1147 return bheads
1150 return bheads
1148
1151
1149 def branches(self, nodes):
1152 def branches(self, nodes):
1150 if not nodes:
1153 if not nodes:
1151 nodes = [self.changelog.tip()]
1154 nodes = [self.changelog.tip()]
1152 b = []
1155 b = []
1153 for n in nodes:
1156 for n in nodes:
1154 t = n
1157 t = n
1155 while 1:
1158 while 1:
1156 p = self.changelog.parents(n)
1159 p = self.changelog.parents(n)
1157 if p[1] != nullid or p[0] == nullid:
1160 if p[1] != nullid or p[0] == nullid:
1158 b.append((t, n, p[0], p[1]))
1161 b.append((t, n, p[0], p[1]))
1159 break
1162 break
1160 n = p[0]
1163 n = p[0]
1161 return b
1164 return b
1162
1165
1163 def between(self, pairs):
1166 def between(self, pairs):
1164 r = []
1167 r = []
1165
1168
1166 for top, bottom in pairs:
1169 for top, bottom in pairs:
1167 n, l, i = top, [], 0
1170 n, l, i = top, [], 0
1168 f = 1
1171 f = 1
1169
1172
1170 while n != bottom and n != nullid:
1173 while n != bottom and n != nullid:
1171 p = self.changelog.parents(n)[0]
1174 p = self.changelog.parents(n)[0]
1172 if i == f:
1175 if i == f:
1173 l.append(n)
1176 l.append(n)
1174 f = f * 2
1177 f = f * 2
1175 n = p
1178 n = p
1176 i += 1
1179 i += 1
1177
1180
1178 r.append(l)
1181 r.append(l)
1179
1182
1180 return r
1183 return r
1181
1184
1182 def pull(self, remote, heads=None, force=False):
1185 def pull(self, remote, heads=None, force=False):
1183 lock = self.lock()
1186 lock = self.lock()
1184 try:
1187 try:
1185 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1188 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1186 force=force)
1189 force=force)
1187 common, fetch, rheads = tmp
1190 common, fetch, rheads = tmp
1188 if not fetch:
1191 if not fetch:
1189 self.ui.status(_("no changes found\n"))
1192 self.ui.status(_("no changes found\n"))
1190 return 0
1193 return 0
1191
1194
1192 if fetch == [nullid]:
1195 if fetch == [nullid]:
1193 self.ui.status(_("requesting all changes\n"))
1196 self.ui.status(_("requesting all changes\n"))
1194 elif heads is None and remote.capable('changegroupsubset'):
1197 elif heads is None and remote.capable('changegroupsubset'):
1195 # issue1320, avoid a race if remote changed after discovery
1198 # issue1320, avoid a race if remote changed after discovery
1196 heads = rheads
1199 heads = rheads
1197
1200
1198 if heads is None:
1201 if heads is None:
1199 cg = remote.changegroup(fetch, 'pull')
1202 cg = remote.changegroup(fetch, 'pull')
1200 else:
1203 else:
1201 if not remote.capable('changegroupsubset'):
1204 if not remote.capable('changegroupsubset'):
1202 raise util.Abort(_("Partial pull cannot be done because "
1205 raise util.Abort(_("Partial pull cannot be done because "
1203 "other repository doesn't support "
1206 "other repository doesn't support "
1204 "changegroupsubset."))
1207 "changegroupsubset."))
1205 cg = remote.changegroupsubset(fetch, heads, 'pull')
1208 cg = remote.changegroupsubset(fetch, heads, 'pull')
1206 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1209 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1207 finally:
1210 finally:
1208 lock.release()
1211 lock.release()
1209
1212
1210 def push(self, remote, force=False, revs=None, newbranch=False):
1213 def push(self, remote, force=False, revs=None, newbranch=False):
1211 '''Push outgoing changesets (limited by revs) from the current
1214 '''Push outgoing changesets (limited by revs) from the current
1212 repository to remote. Return an integer:
1215 repository to remote. Return an integer:
1213 - 0 means HTTP error *or* nothing to push
1216 - 0 means HTTP error *or* nothing to push
1214 - 1 means we pushed and remote head count is unchanged *or*
1217 - 1 means we pushed and remote head count is unchanged *or*
1215 we have outgoing changesets but refused to push
1218 we have outgoing changesets but refused to push
1216 - other values as described by addchangegroup()
1219 - other values as described by addchangegroup()
1217 '''
1220 '''
1218 # there are two ways to push to remote repo:
1221 # there are two ways to push to remote repo:
1219 #
1222 #
1220 # addchangegroup assumes local user can lock remote
1223 # addchangegroup assumes local user can lock remote
1221 # repo (local filesystem, old ssh servers).
1224 # repo (local filesystem, old ssh servers).
1222 #
1225 #
1223 # unbundle assumes local user cannot lock remote repo (new ssh
1226 # unbundle assumes local user cannot lock remote repo (new ssh
1224 # servers, http servers).
1227 # servers, http servers).
1225
1228
1226 lock = None
1229 lock = None
1227 unbundle = remote.capable('unbundle')
1230 unbundle = remote.capable('unbundle')
1228 if not unbundle:
1231 if not unbundle:
1229 lock = remote.lock()
1232 lock = remote.lock()
1230 try:
1233 try:
1231 ret = discovery.prepush(self, remote, force, revs, newbranch)
1234 ret = discovery.prepush(self, remote, force, revs, newbranch)
1232 if ret[0] is None:
1235 if ret[0] is None:
1233 # and here we return 0 for "nothing to push" or 1 for
1236 # and here we return 0 for "nothing to push" or 1 for
1234 # "something to push but I refuse"
1237 # "something to push but I refuse"
1235 return ret[1]
1238 return ret[1]
1236
1239
1237 cg, remote_heads = ret
1240 cg, remote_heads = ret
1238 if unbundle:
1241 if unbundle:
1239 # local repo finds heads on server, finds out what revs it must
1242 # local repo finds heads on server, finds out what revs it must
1240 # push. once revs transferred, if server finds it has
1243 # push. once revs transferred, if server finds it has
1241 # different heads (someone else won commit/push race), server
1244 # different heads (someone else won commit/push race), server
1242 # aborts.
1245 # aborts.
1243 if force:
1246 if force:
1244 remote_heads = ['force']
1247 remote_heads = ['force']
1245 # ssh: return remote's addchangegroup()
1248 # ssh: return remote's addchangegroup()
1246 # http: return remote's addchangegroup() or 0 for error
1249 # http: return remote's addchangegroup() or 0 for error
1247 return remote.unbundle(cg, remote_heads, 'push')
1250 return remote.unbundle(cg, remote_heads, 'push')
1248 else:
1251 else:
1249 # we return an integer indicating remote head count change
1252 # we return an integer indicating remote head count change
1250 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1253 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1251 finally:
1254 finally:
1252 if lock is not None:
1255 if lock is not None:
1253 lock.release()
1256 lock.release()
1254
1257
1255 def changegroupinfo(self, nodes, source):
1258 def changegroupinfo(self, nodes, source):
1256 if self.ui.verbose or source == 'bundle':
1259 if self.ui.verbose or source == 'bundle':
1257 self.ui.status(_("%d changesets found\n") % len(nodes))
1260 self.ui.status(_("%d changesets found\n") % len(nodes))
1258 if self.ui.debugflag:
1261 if self.ui.debugflag:
1259 self.ui.debug("list of changesets:\n")
1262 self.ui.debug("list of changesets:\n")
1260 for node in nodes:
1263 for node in nodes:
1261 self.ui.debug("%s\n" % hex(node))
1264 self.ui.debug("%s\n" % hex(node))
1262
1265
1263 def changegroupsubset(self, bases, heads, source, extranodes=None):
1266 def changegroupsubset(self, bases, heads, source, extranodes=None):
1264 """Compute a changegroup consisting of all the nodes that are
1267 """Compute a changegroup consisting of all the nodes that are
1265 descendents of any of the bases and ancestors of any of the heads.
1268 descendents of any of the bases and ancestors of any of the heads.
1266 Return a chunkbuffer object whose read() method will return
1269 Return a chunkbuffer object whose read() method will return
1267 successive changegroup chunks.
1270 successive changegroup chunks.
1268
1271
1269 It is fairly complex as determining which filenodes and which
1272 It is fairly complex as determining which filenodes and which
1270 manifest nodes need to be included for the changeset to be complete
1273 manifest nodes need to be included for the changeset to be complete
1271 is non-trivial.
1274 is non-trivial.
1272
1275
1273 Another wrinkle is doing the reverse, figuring out which changeset in
1276 Another wrinkle is doing the reverse, figuring out which changeset in
1274 the changegroup a particular filenode or manifestnode belongs to.
1277 the changegroup a particular filenode or manifestnode belongs to.
1275
1278
1276 The caller can specify some nodes that must be included in the
1279 The caller can specify some nodes that must be included in the
1277 changegroup using the extranodes argument. It should be a dict
1280 changegroup using the extranodes argument. It should be a dict
1278 where the keys are the filenames (or 1 for the manifest), and the
1281 where the keys are the filenames (or 1 for the manifest), and the
1279 values are lists of (node, linknode) tuples, where node is a wanted
1282 values are lists of (node, linknode) tuples, where node is a wanted
1280 node and linknode is the changelog node that should be transmitted as
1283 node and linknode is the changelog node that should be transmitted as
1281 the linkrev.
1284 the linkrev.
1282 """
1285 """
1283
1286
1284 # Set up some initial variables
1287 # Set up some initial variables
1285 # Make it easy to refer to self.changelog
1288 # Make it easy to refer to self.changelog
1286 cl = self.changelog
1289 cl = self.changelog
1287 # Compute the list of changesets in this changegroup.
1290 # Compute the list of changesets in this changegroup.
1288 # Some bases may turn out to be superfluous, and some heads may be
1291 # Some bases may turn out to be superfluous, and some heads may be
1289 # too. nodesbetween will return the minimal set of bases and heads
1292 # too. nodesbetween will return the minimal set of bases and heads
1290 # necessary to re-create the changegroup.
1293 # necessary to re-create the changegroup.
1291 if not bases:
1294 if not bases:
1292 bases = [nullid]
1295 bases = [nullid]
1293 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1296 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1294
1297
1295 if extranodes is None:
1298 if extranodes is None:
1296 # can we go through the fast path ?
1299 # can we go through the fast path ?
1297 heads.sort()
1300 heads.sort()
1298 allheads = self.heads()
1301 allheads = self.heads()
1299 allheads.sort()
1302 allheads.sort()
1300 if heads == allheads:
1303 if heads == allheads:
1301 return self._changegroup(msng_cl_lst, source)
1304 return self._changegroup(msng_cl_lst, source)
1302
1305
1303 # slow path
1306 # slow path
1304 self.hook('preoutgoing', throw=True, source=source)
1307 self.hook('preoutgoing', throw=True, source=source)
1305
1308
1306 self.changegroupinfo(msng_cl_lst, source)
1309 self.changegroupinfo(msng_cl_lst, source)
1307
1310
1308 # We assume that all ancestors of bases are known
1311 # We assume that all ancestors of bases are known
1309 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1312 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1310
1313
1311 # Make it easy to refer to self.manifest
1314 # Make it easy to refer to self.manifest
1312 mnfst = self.manifest
1315 mnfst = self.manifest
1313 # We don't know which manifests are missing yet
1316 # We don't know which manifests are missing yet
1314 msng_mnfst_set = {}
1317 msng_mnfst_set = {}
1315 # Nor do we know which filenodes are missing.
1318 # Nor do we know which filenodes are missing.
1316 msng_filenode_set = {}
1319 msng_filenode_set = {}
1317
1320
1318 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1321 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1319 junk = None
1322 junk = None
1320
1323
1321 # A changeset always belongs to itself, so the changenode lookup
1324 # A changeset always belongs to itself, so the changenode lookup
1322 # function for a changenode is identity.
1325 # function for a changenode is identity.
1323 def identity(x):
1326 def identity(x):
1324 return x
1327 return x
1325
1328
1326 # A function generating function that sets up the initial environment
1329 # A function generating function that sets up the initial environment
1327 # the inner function.
1330 # the inner function.
1328 def filenode_collector(changedfiles):
1331 def filenode_collector(changedfiles):
1329 # This gathers information from each manifestnode included in the
1332 # This gathers information from each manifestnode included in the
1330 # changegroup about which filenodes the manifest node references
1333 # changegroup about which filenodes the manifest node references
1331 # so we can include those in the changegroup too.
1334 # so we can include those in the changegroup too.
1332 #
1335 #
1333 # It also remembers which changenode each filenode belongs to. It
1336 # It also remembers which changenode each filenode belongs to. It
1334 # does this by assuming the a filenode belongs to the changenode
1337 # does this by assuming the a filenode belongs to the changenode
1335 # the first manifest that references it belongs to.
1338 # the first manifest that references it belongs to.
1336 def collect_msng_filenodes(mnfstnode):
1339 def collect_msng_filenodes(mnfstnode):
1337 r = mnfst.rev(mnfstnode)
1340 r = mnfst.rev(mnfstnode)
1338 if r - 1 in mnfst.parentrevs(r):
1341 if r - 1 in mnfst.parentrevs(r):
1339 # If the previous rev is one of the parents,
1342 # If the previous rev is one of the parents,
1340 # we only need to see a diff.
1343 # we only need to see a diff.
1341 deltamf = mnfst.readdelta(mnfstnode)
1344 deltamf = mnfst.readdelta(mnfstnode)
1342 # For each line in the delta
1345 # For each line in the delta
1343 for f, fnode in deltamf.iteritems():
1346 for f, fnode in deltamf.iteritems():
1344 # And if the file is in the list of files we care
1347 # And if the file is in the list of files we care
1345 # about.
1348 # about.
1346 if f in changedfiles:
1349 if f in changedfiles:
1347 # Get the changenode this manifest belongs to
1350 # Get the changenode this manifest belongs to
1348 clnode = msng_mnfst_set[mnfstnode]
1351 clnode = msng_mnfst_set[mnfstnode]
1349 # Create the set of filenodes for the file if
1352 # Create the set of filenodes for the file if
1350 # there isn't one already.
1353 # there isn't one already.
1351 ndset = msng_filenode_set.setdefault(f, {})
1354 ndset = msng_filenode_set.setdefault(f, {})
1352 # And set the filenode's changelog node to the
1355 # And set the filenode's changelog node to the
1353 # manifest's if it hasn't been set already.
1356 # manifest's if it hasn't been set already.
1354 ndset.setdefault(fnode, clnode)
1357 ndset.setdefault(fnode, clnode)
1355 else:
1358 else:
1356 # Otherwise we need a full manifest.
1359 # Otherwise we need a full manifest.
1357 m = mnfst.read(mnfstnode)
1360 m = mnfst.read(mnfstnode)
1358 # For every file in we care about.
1361 # For every file in we care about.
1359 for f in changedfiles:
1362 for f in changedfiles:
1360 fnode = m.get(f, None)
1363 fnode = m.get(f, None)
1361 # If it's in the manifest
1364 # If it's in the manifest
1362 if fnode is not None:
1365 if fnode is not None:
1363 # See comments above.
1366 # See comments above.
1364 clnode = msng_mnfst_set[mnfstnode]
1367 clnode = msng_mnfst_set[mnfstnode]
1365 ndset = msng_filenode_set.setdefault(f, {})
1368 ndset = msng_filenode_set.setdefault(f, {})
1366 ndset.setdefault(fnode, clnode)
1369 ndset.setdefault(fnode, clnode)
1367 return collect_msng_filenodes
1370 return collect_msng_filenodes
1368
1371
1369 # If we determine that a particular file or manifest node must be a
1372 # If we determine that a particular file or manifest node must be a
1370 # node that the recipient of the changegroup will already have, we can
1373 # node that the recipient of the changegroup will already have, we can
1371 # also assume the recipient will have all the parents. This function
1374 # also assume the recipient will have all the parents. This function
1372 # prunes them from the set of missing nodes.
1375 # prunes them from the set of missing nodes.
1373 def prune(revlog, missingnodes):
1376 def prune(revlog, missingnodes):
1374 hasset = set()
1377 hasset = set()
1375 # If a 'missing' filenode thinks it belongs to a changenode we
1378 # If a 'missing' filenode thinks it belongs to a changenode we
1376 # assume the recipient must have, then the recipient must have
1379 # assume the recipient must have, then the recipient must have
1377 # that filenode.
1380 # that filenode.
1378 for n in missingnodes:
1381 for n in missingnodes:
1379 clrev = revlog.linkrev(revlog.rev(n))
1382 clrev = revlog.linkrev(revlog.rev(n))
1380 if clrev in commonrevs:
1383 if clrev in commonrevs:
1381 hasset.add(n)
1384 hasset.add(n)
1382 for n in hasset:
1385 for n in hasset:
1383 missingnodes.pop(n, None)
1386 missingnodes.pop(n, None)
1384 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1387 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1385 missingnodes.pop(revlog.node(r), None)
1388 missingnodes.pop(revlog.node(r), None)
1386
1389
1387 # Add the nodes that were explicitly requested.
1390 # Add the nodes that were explicitly requested.
1388 def add_extra_nodes(name, nodes):
1391 def add_extra_nodes(name, nodes):
1389 if not extranodes or name not in extranodes:
1392 if not extranodes or name not in extranodes:
1390 return
1393 return
1391
1394
1392 for node, linknode in extranodes[name]:
1395 for node, linknode in extranodes[name]:
1393 if node not in nodes:
1396 if node not in nodes:
1394 nodes[node] = linknode
1397 nodes[node] = linknode
1395
1398
1396 # Now that we have all theses utility functions to help out and
1399 # Now that we have all theses utility functions to help out and
1397 # logically divide up the task, generate the group.
1400 # logically divide up the task, generate the group.
1398 def gengroup():
1401 def gengroup():
1399 # The set of changed files starts empty.
1402 # The set of changed files starts empty.
1400 changedfiles = set()
1403 changedfiles = set()
1401 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1404 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1402
1405
1403 # Create a changenode group generator that will call our functions
1406 # Create a changenode group generator that will call our functions
1404 # back to lookup the owning changenode and collect information.
1407 # back to lookup the owning changenode and collect information.
1405 group = cl.group(msng_cl_lst, identity, collect)
1408 group = cl.group(msng_cl_lst, identity, collect)
1406 for cnt, chnk in enumerate(group):
1409 for cnt, chnk in enumerate(group):
1407 yield chnk
1410 yield chnk
1408 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1411 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1409 self.ui.progress(_('bundling changes'), None)
1412 self.ui.progress(_('bundling changes'), None)
1410
1413
1411 prune(mnfst, msng_mnfst_set)
1414 prune(mnfst, msng_mnfst_set)
1412 add_extra_nodes(1, msng_mnfst_set)
1415 add_extra_nodes(1, msng_mnfst_set)
1413 msng_mnfst_lst = msng_mnfst_set.keys()
1416 msng_mnfst_lst = msng_mnfst_set.keys()
1414 # Sort the manifestnodes by revision number.
1417 # Sort the manifestnodes by revision number.
1415 msng_mnfst_lst.sort(key=mnfst.rev)
1418 msng_mnfst_lst.sort(key=mnfst.rev)
1416 # Create a generator for the manifestnodes that calls our lookup
1419 # Create a generator for the manifestnodes that calls our lookup
1417 # and data collection functions back.
1420 # and data collection functions back.
1418 group = mnfst.group(msng_mnfst_lst,
1421 group = mnfst.group(msng_mnfst_lst,
1419 lambda mnode: msng_mnfst_set[mnode],
1422 lambda mnode: msng_mnfst_set[mnode],
1420 filenode_collector(changedfiles))
1423 filenode_collector(changedfiles))
1421 for cnt, chnk in enumerate(group):
1424 for cnt, chnk in enumerate(group):
1422 yield chnk
1425 yield chnk
1423 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1426 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1424 self.ui.progress(_('bundling manifests'), None)
1427 self.ui.progress(_('bundling manifests'), None)
1425
1428
1426 # These are no longer needed, dereference and toss the memory for
1429 # These are no longer needed, dereference and toss the memory for
1427 # them.
1430 # them.
1428 msng_mnfst_lst = None
1431 msng_mnfst_lst = None
1429 msng_mnfst_set.clear()
1432 msng_mnfst_set.clear()
1430
1433
1431 if extranodes:
1434 if extranodes:
1432 for fname in extranodes:
1435 for fname in extranodes:
1433 if isinstance(fname, int):
1436 if isinstance(fname, int):
1434 continue
1437 continue
1435 msng_filenode_set.setdefault(fname, {})
1438 msng_filenode_set.setdefault(fname, {})
1436 changedfiles.add(fname)
1439 changedfiles.add(fname)
1437 # Go through all our files in order sorted by name.
1440 # Go through all our files in order sorted by name.
1438 cnt = 0
1441 cnt = 0
1439 for fname in sorted(changedfiles):
1442 for fname in sorted(changedfiles):
1440 filerevlog = self.file(fname)
1443 filerevlog = self.file(fname)
1441 if not len(filerevlog):
1444 if not len(filerevlog):
1442 raise util.Abort(_("empty or missing revlog for %s") % fname)
1445 raise util.Abort(_("empty or missing revlog for %s") % fname)
1443 # Toss out the filenodes that the recipient isn't really
1446 # Toss out the filenodes that the recipient isn't really
1444 # missing.
1447 # missing.
1445 missingfnodes = msng_filenode_set.pop(fname, {})
1448 missingfnodes = msng_filenode_set.pop(fname, {})
1446 prune(filerevlog, missingfnodes)
1449 prune(filerevlog, missingfnodes)
1447 add_extra_nodes(fname, missingfnodes)
1450 add_extra_nodes(fname, missingfnodes)
1448 # If any filenodes are left, generate the group for them,
1451 # If any filenodes are left, generate the group for them,
1449 # otherwise don't bother.
1452 # otherwise don't bother.
1450 if missingfnodes:
1453 if missingfnodes:
1451 yield changegroup.chunkheader(len(fname))
1454 yield changegroup.chunkheader(len(fname))
1452 yield fname
1455 yield fname
1453 # Sort the filenodes by their revision # (topological order)
1456 # Sort the filenodes by their revision # (topological order)
1454 nodeiter = list(missingfnodes)
1457 nodeiter = list(missingfnodes)
1455 nodeiter.sort(key=filerevlog.rev)
1458 nodeiter.sort(key=filerevlog.rev)
1456 # Create a group generator and only pass in a changenode
1459 # Create a group generator and only pass in a changenode
1457 # lookup function as we need to collect no information
1460 # lookup function as we need to collect no information
1458 # from filenodes.
1461 # from filenodes.
1459 group = filerevlog.group(nodeiter,
1462 group = filerevlog.group(nodeiter,
1460 lambda fnode: missingfnodes[fnode])
1463 lambda fnode: missingfnodes[fnode])
1461 for chnk in group:
1464 for chnk in group:
1462 self.ui.progress(
1465 self.ui.progress(
1463 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1466 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1464 cnt += 1
1467 cnt += 1
1465 yield chnk
1468 yield chnk
1466 # Signal that no more groups are left.
1469 # Signal that no more groups are left.
1467 yield changegroup.closechunk()
1470 yield changegroup.closechunk()
1468 self.ui.progress(_('bundling files'), None)
1471 self.ui.progress(_('bundling files'), None)
1469
1472
1470 if msng_cl_lst:
1473 if msng_cl_lst:
1471 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1474 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1472
1475
1473 return util.chunkbuffer(gengroup())
1476 return util.chunkbuffer(gengroup())
1474
1477
1475 def changegroup(self, basenodes, source):
1478 def changegroup(self, basenodes, source):
1476 # to avoid a race we use changegroupsubset() (issue1320)
1479 # to avoid a race we use changegroupsubset() (issue1320)
1477 return self.changegroupsubset(basenodes, self.heads(), source)
1480 return self.changegroupsubset(basenodes, self.heads(), source)
1478
1481
1479 def _changegroup(self, nodes, source):
1482 def _changegroup(self, nodes, source):
1480 """Compute the changegroup of all nodes that we have that a recipient
1483 """Compute the changegroup of all nodes that we have that a recipient
1481 doesn't. Return a chunkbuffer object whose read() method will return
1484 doesn't. Return a chunkbuffer object whose read() method will return
1482 successive changegroup chunks.
1485 successive changegroup chunks.
1483
1486
1484 This is much easier than the previous function as we can assume that
1487 This is much easier than the previous function as we can assume that
1485 the recipient has any changenode we aren't sending them.
1488 the recipient has any changenode we aren't sending them.
1486
1489
1487 nodes is the set of nodes to send"""
1490 nodes is the set of nodes to send"""
1488
1491
1489 self.hook('preoutgoing', throw=True, source=source)
1492 self.hook('preoutgoing', throw=True, source=source)
1490
1493
1491 cl = self.changelog
1494 cl = self.changelog
1492 revset = set([cl.rev(n) for n in nodes])
1495 revset = set([cl.rev(n) for n in nodes])
1493 self.changegroupinfo(nodes, source)
1496 self.changegroupinfo(nodes, source)
1494
1497
1495 def identity(x):
1498 def identity(x):
1496 return x
1499 return x
1497
1500
1498 def gennodelst(log):
1501 def gennodelst(log):
1499 for r in log:
1502 for r in log:
1500 if log.linkrev(r) in revset:
1503 if log.linkrev(r) in revset:
1501 yield log.node(r)
1504 yield log.node(r)
1502
1505
1503 def lookuplinkrev_func(revlog):
1506 def lookuplinkrev_func(revlog):
1504 def lookuplinkrev(n):
1507 def lookuplinkrev(n):
1505 return cl.node(revlog.linkrev(revlog.rev(n)))
1508 return cl.node(revlog.linkrev(revlog.rev(n)))
1506 return lookuplinkrev
1509 return lookuplinkrev
1507
1510
1508 def gengroup():
1511 def gengroup():
1509 '''yield a sequence of changegroup chunks (strings)'''
1512 '''yield a sequence of changegroup chunks (strings)'''
1510 # construct a list of all changed files
1513 # construct a list of all changed files
1511 changedfiles = set()
1514 changedfiles = set()
1512 mmfs = {}
1515 mmfs = {}
1513 collect = changegroup.collector(cl, mmfs, changedfiles)
1516 collect = changegroup.collector(cl, mmfs, changedfiles)
1514
1517
1515 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1518 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1516 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1519 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1517 yield chnk
1520 yield chnk
1518 self.ui.progress(_('bundling changes'), None)
1521 self.ui.progress(_('bundling changes'), None)
1519
1522
1520 mnfst = self.manifest
1523 mnfst = self.manifest
1521 nodeiter = gennodelst(mnfst)
1524 nodeiter = gennodelst(mnfst)
1522 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1525 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1523 lookuplinkrev_func(mnfst))):
1526 lookuplinkrev_func(mnfst))):
1524 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1527 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1525 yield chnk
1528 yield chnk
1526 self.ui.progress(_('bundling manifests'), None)
1529 self.ui.progress(_('bundling manifests'), None)
1527
1530
1528 cnt = 0
1531 cnt = 0
1529 for fname in sorted(changedfiles):
1532 for fname in sorted(changedfiles):
1530 filerevlog = self.file(fname)
1533 filerevlog = self.file(fname)
1531 if not len(filerevlog):
1534 if not len(filerevlog):
1532 raise util.Abort(_("empty or missing revlog for %s") % fname)
1535 raise util.Abort(_("empty or missing revlog for %s") % fname)
1533 nodeiter = gennodelst(filerevlog)
1536 nodeiter = gennodelst(filerevlog)
1534 nodeiter = list(nodeiter)
1537 nodeiter = list(nodeiter)
1535 if nodeiter:
1538 if nodeiter:
1536 yield changegroup.chunkheader(len(fname))
1539 yield changegroup.chunkheader(len(fname))
1537 yield fname
1540 yield fname
1538 lookup = lookuplinkrev_func(filerevlog)
1541 lookup = lookuplinkrev_func(filerevlog)
1539 for chnk in filerevlog.group(nodeiter, lookup):
1542 for chnk in filerevlog.group(nodeiter, lookup):
1540 self.ui.progress(
1543 self.ui.progress(
1541 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1544 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1542 cnt += 1
1545 cnt += 1
1543 yield chnk
1546 yield chnk
1544 self.ui.progress(_('bundling files'), None)
1547 self.ui.progress(_('bundling files'), None)
1545
1548
1546 yield changegroup.closechunk()
1549 yield changegroup.closechunk()
1547
1550
1548 if nodes:
1551 if nodes:
1549 self.hook('outgoing', node=hex(nodes[0]), source=source)
1552 self.hook('outgoing', node=hex(nodes[0]), source=source)
1550
1553
1551 return util.chunkbuffer(gengroup())
1554 return util.chunkbuffer(gengroup())
1552
1555
1553 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1556 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1554 """Add the changegroup returned by source.read() to this repo.
1557 """Add the changegroup returned by source.read() to this repo.
1555 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1558 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1556 the URL of the repo where this changegroup is coming from.
1559 the URL of the repo where this changegroup is coming from.
1557
1560
1558 Return an integer summarizing the change to this repo:
1561 Return an integer summarizing the change to this repo:
1559 - nothing changed or no source: 0
1562 - nothing changed or no source: 0
1560 - more heads than before: 1+added heads (2..n)
1563 - more heads than before: 1+added heads (2..n)
1561 - fewer heads than before: -1-removed heads (-2..-n)
1564 - fewer heads than before: -1-removed heads (-2..-n)
1562 - number of heads stays the same: 1
1565 - number of heads stays the same: 1
1563 """
1566 """
1564 def csmap(x):
1567 def csmap(x):
1565 self.ui.debug("add changeset %s\n" % short(x))
1568 self.ui.debug("add changeset %s\n" % short(x))
1566 return len(cl)
1569 return len(cl)
1567
1570
1568 def revmap(x):
1571 def revmap(x):
1569 return cl.rev(x)
1572 return cl.rev(x)
1570
1573
1571 if not source:
1574 if not source:
1572 return 0
1575 return 0
1573
1576
1574 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1577 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1575
1578
1576 changesets = files = revisions = 0
1579 changesets = files = revisions = 0
1577 efiles = set()
1580 efiles = set()
1578
1581
1579 # write changelog data to temp files so concurrent readers will not see
1582 # write changelog data to temp files so concurrent readers will not see
1580 # inconsistent view
1583 # inconsistent view
1581 cl = self.changelog
1584 cl = self.changelog
1582 cl.delayupdate()
1585 cl.delayupdate()
1583 oldheads = len(cl.heads())
1586 oldheads = len(cl.heads())
1584
1587
1585 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1588 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1586 try:
1589 try:
1587 trp = weakref.proxy(tr)
1590 trp = weakref.proxy(tr)
1588 # pull off the changeset group
1591 # pull off the changeset group
1589 self.ui.status(_("adding changesets\n"))
1592 self.ui.status(_("adding changesets\n"))
1590 clstart = len(cl)
1593 clstart = len(cl)
1591 class prog(object):
1594 class prog(object):
1592 step = _('changesets')
1595 step = _('changesets')
1593 count = 1
1596 count = 1
1594 ui = self.ui
1597 ui = self.ui
1595 total = None
1598 total = None
1596 def __call__(self):
1599 def __call__(self):
1597 self.ui.progress(self.step, self.count, unit=_('chunks'),
1600 self.ui.progress(self.step, self.count, unit=_('chunks'),
1598 total=self.total)
1601 total=self.total)
1599 self.count += 1
1602 self.count += 1
1600 pr = prog()
1603 pr = prog()
1601 chunkiter = changegroup.chunkiter(source, progress=pr)
1604 chunkiter = changegroup.chunkiter(source, progress=pr)
1602 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1605 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1603 raise util.Abort(_("received changelog group is empty"))
1606 raise util.Abort(_("received changelog group is empty"))
1604 clend = len(cl)
1607 clend = len(cl)
1605 changesets = clend - clstart
1608 changesets = clend - clstart
1606 for c in xrange(clstart, clend):
1609 for c in xrange(clstart, clend):
1607 efiles.update(self[c].files())
1610 efiles.update(self[c].files())
1608 efiles = len(efiles)
1611 efiles = len(efiles)
1609 self.ui.progress(_('changesets'), None)
1612 self.ui.progress(_('changesets'), None)
1610
1613
1611 # pull off the manifest group
1614 # pull off the manifest group
1612 self.ui.status(_("adding manifests\n"))
1615 self.ui.status(_("adding manifests\n"))
1613 pr.step = _('manifests')
1616 pr.step = _('manifests')
1614 pr.count = 1
1617 pr.count = 1
1615 pr.total = changesets # manifests <= changesets
1618 pr.total = changesets # manifests <= changesets
1616 chunkiter = changegroup.chunkiter(source, progress=pr)
1619 chunkiter = changegroup.chunkiter(source, progress=pr)
1617 # no need to check for empty manifest group here:
1620 # no need to check for empty manifest group here:
1618 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1621 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1619 # no new manifest will be created and the manifest group will
1622 # no new manifest will be created and the manifest group will
1620 # be empty during the pull
1623 # be empty during the pull
1621 self.manifest.addgroup(chunkiter, revmap, trp)
1624 self.manifest.addgroup(chunkiter, revmap, trp)
1622 self.ui.progress(_('manifests'), None)
1625 self.ui.progress(_('manifests'), None)
1623
1626
1624 needfiles = {}
1627 needfiles = {}
1625 if self.ui.configbool('server', 'validate', default=False):
1628 if self.ui.configbool('server', 'validate', default=False):
1626 # validate incoming csets have their manifests
1629 # validate incoming csets have their manifests
1627 for cset in xrange(clstart, clend):
1630 for cset in xrange(clstart, clend):
1628 mfest = self.changelog.read(self.changelog.node(cset))[0]
1631 mfest = self.changelog.read(self.changelog.node(cset))[0]
1629 mfest = self.manifest.readdelta(mfest)
1632 mfest = self.manifest.readdelta(mfest)
1630 # store file nodes we must see
1633 # store file nodes we must see
1631 for f, n in mfest.iteritems():
1634 for f, n in mfest.iteritems():
1632 needfiles.setdefault(f, set()).add(n)
1635 needfiles.setdefault(f, set()).add(n)
1633
1636
1634 # process the files
1637 # process the files
1635 self.ui.status(_("adding file changes\n"))
1638 self.ui.status(_("adding file changes\n"))
1636 pr.step = 'files'
1639 pr.step = 'files'
1637 pr.count = 1
1640 pr.count = 1
1638 pr.total = efiles
1641 pr.total = efiles
1639 while 1:
1642 while 1:
1640 f = changegroup.getchunk(source)
1643 f = changegroup.getchunk(source)
1641 if not f:
1644 if not f:
1642 break
1645 break
1643 self.ui.debug("adding %s revisions\n" % f)
1646 self.ui.debug("adding %s revisions\n" % f)
1644 pr()
1647 pr()
1645 fl = self.file(f)
1648 fl = self.file(f)
1646 o = len(fl)
1649 o = len(fl)
1647 chunkiter = changegroup.chunkiter(source)
1650 chunkiter = changegroup.chunkiter(source)
1648 if fl.addgroup(chunkiter, revmap, trp) is None:
1651 if fl.addgroup(chunkiter, revmap, trp) is None:
1649 raise util.Abort(_("received file revlog group is empty"))
1652 raise util.Abort(_("received file revlog group is empty"))
1650 revisions += len(fl) - o
1653 revisions += len(fl) - o
1651 files += 1
1654 files += 1
1652 if f in needfiles:
1655 if f in needfiles:
1653 needs = needfiles[f]
1656 needs = needfiles[f]
1654 for new in xrange(o, len(fl)):
1657 for new in xrange(o, len(fl)):
1655 n = fl.node(new)
1658 n = fl.node(new)
1656 if n in needs:
1659 if n in needs:
1657 needs.remove(n)
1660 needs.remove(n)
1658 if not needs:
1661 if not needs:
1659 del needfiles[f]
1662 del needfiles[f]
1660 self.ui.progress(_('files'), None)
1663 self.ui.progress(_('files'), None)
1661
1664
1662 for f, needs in needfiles.iteritems():
1665 for f, needs in needfiles.iteritems():
1663 fl = self.file(f)
1666 fl = self.file(f)
1664 for n in needs:
1667 for n in needs:
1665 try:
1668 try:
1666 fl.rev(n)
1669 fl.rev(n)
1667 except error.LookupError:
1670 except error.LookupError:
1668 raise util.Abort(
1671 raise util.Abort(
1669 _('missing file data for %s:%s - run hg verify') %
1672 _('missing file data for %s:%s - run hg verify') %
1670 (f, hex(n)))
1673 (f, hex(n)))
1671
1674
1672 newheads = len(cl.heads())
1675 newheads = len(cl.heads())
1673 heads = ""
1676 heads = ""
1674 if oldheads and newheads != oldheads:
1677 if oldheads and newheads != oldheads:
1675 heads = _(" (%+d heads)") % (newheads - oldheads)
1678 heads = _(" (%+d heads)") % (newheads - oldheads)
1676
1679
1677 self.ui.status(_("added %d changesets"
1680 self.ui.status(_("added %d changesets"
1678 " with %d changes to %d files%s\n")
1681 " with %d changes to %d files%s\n")
1679 % (changesets, revisions, files, heads))
1682 % (changesets, revisions, files, heads))
1680
1683
1681 if changesets > 0:
1684 if changesets > 0:
1682 p = lambda: cl.writepending() and self.root or ""
1685 p = lambda: cl.writepending() and self.root or ""
1683 self.hook('pretxnchangegroup', throw=True,
1686 self.hook('pretxnchangegroup', throw=True,
1684 node=hex(cl.node(clstart)), source=srctype,
1687 node=hex(cl.node(clstart)), source=srctype,
1685 url=url, pending=p)
1688 url=url, pending=p)
1686
1689
1687 # make changelog see real files again
1690 # make changelog see real files again
1688 cl.finalize(trp)
1691 cl.finalize(trp)
1689
1692
1690 tr.close()
1693 tr.close()
1691 finally:
1694 finally:
1692 tr.release()
1695 tr.release()
1693 if lock:
1696 if lock:
1694 lock.release()
1697 lock.release()
1695
1698
1696 if changesets > 0:
1699 if changesets > 0:
1697 # forcefully update the on-disk branch cache
1700 # forcefully update the on-disk branch cache
1698 self.ui.debug("updating the branch cache\n")
1701 self.ui.debug("updating the branch cache\n")
1699 self.branchtags()
1702 self.branchtags()
1700 self.hook("changegroup", node=hex(cl.node(clstart)),
1703 self.hook("changegroup", node=hex(cl.node(clstart)),
1701 source=srctype, url=url)
1704 source=srctype, url=url)
1702
1705
1703 for i in xrange(clstart, clend):
1706 for i in xrange(clstart, clend):
1704 self.hook("incoming", node=hex(cl.node(i)),
1707 self.hook("incoming", node=hex(cl.node(i)),
1705 source=srctype, url=url)
1708 source=srctype, url=url)
1706
1709
1707 # never return 0 here:
1710 # never return 0 here:
1708 if newheads < oldheads:
1711 if newheads < oldheads:
1709 return newheads - oldheads - 1
1712 return newheads - oldheads - 1
1710 else:
1713 else:
1711 return newheads - oldheads + 1
1714 return newheads - oldheads + 1
1712
1715
1713
1716
1714 def stream_in(self, remote):
1717 def stream_in(self, remote):
1715 fp = remote.stream_out()
1718 fp = remote.stream_out()
1716 l = fp.readline()
1719 l = fp.readline()
1717 try:
1720 try:
1718 resp = int(l)
1721 resp = int(l)
1719 except ValueError:
1722 except ValueError:
1720 raise error.ResponseError(
1723 raise error.ResponseError(
1721 _('Unexpected response from remote server:'), l)
1724 _('Unexpected response from remote server:'), l)
1722 if resp == 1:
1725 if resp == 1:
1723 raise util.Abort(_('operation forbidden by server'))
1726 raise util.Abort(_('operation forbidden by server'))
1724 elif resp == 2:
1727 elif resp == 2:
1725 raise util.Abort(_('locking the remote repository failed'))
1728 raise util.Abort(_('locking the remote repository failed'))
1726 elif resp != 0:
1729 elif resp != 0:
1727 raise util.Abort(_('the server sent an unknown error code'))
1730 raise util.Abort(_('the server sent an unknown error code'))
1728 self.ui.status(_('streaming all changes\n'))
1731 self.ui.status(_('streaming all changes\n'))
1729 l = fp.readline()
1732 l = fp.readline()
1730 try:
1733 try:
1731 total_files, total_bytes = map(int, l.split(' ', 1))
1734 total_files, total_bytes = map(int, l.split(' ', 1))
1732 except (ValueError, TypeError):
1735 except (ValueError, TypeError):
1733 raise error.ResponseError(
1736 raise error.ResponseError(
1734 _('Unexpected response from remote server:'), l)
1737 _('Unexpected response from remote server:'), l)
1735 self.ui.status(_('%d files to transfer, %s of data\n') %
1738 self.ui.status(_('%d files to transfer, %s of data\n') %
1736 (total_files, util.bytecount(total_bytes)))
1739 (total_files, util.bytecount(total_bytes)))
1737 start = time.time()
1740 start = time.time()
1738 for i in xrange(total_files):
1741 for i in xrange(total_files):
1739 # XXX doesn't support '\n' or '\r' in filenames
1742 # XXX doesn't support '\n' or '\r' in filenames
1740 l = fp.readline()
1743 l = fp.readline()
1741 try:
1744 try:
1742 name, size = l.split('\0', 1)
1745 name, size = l.split('\0', 1)
1743 size = int(size)
1746 size = int(size)
1744 except (ValueError, TypeError):
1747 except (ValueError, TypeError):
1745 raise error.ResponseError(
1748 raise error.ResponseError(
1746 _('Unexpected response from remote server:'), l)
1749 _('Unexpected response from remote server:'), l)
1747 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1750 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1748 # for backwards compat, name was partially encoded
1751 # for backwards compat, name was partially encoded
1749 ofp = self.sopener(store.decodedir(name), 'w')
1752 ofp = self.sopener(store.decodedir(name), 'w')
1750 for chunk in util.filechunkiter(fp, limit=size):
1753 for chunk in util.filechunkiter(fp, limit=size):
1751 ofp.write(chunk)
1754 ofp.write(chunk)
1752 ofp.close()
1755 ofp.close()
1753 elapsed = time.time() - start
1756 elapsed = time.time() - start
1754 if elapsed <= 0:
1757 if elapsed <= 0:
1755 elapsed = 0.001
1758 elapsed = 0.001
1756 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1759 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1757 (util.bytecount(total_bytes), elapsed,
1760 (util.bytecount(total_bytes), elapsed,
1758 util.bytecount(total_bytes / elapsed)))
1761 util.bytecount(total_bytes / elapsed)))
1759 self.invalidate()
1762 self.invalidate()
1760 return len(self.heads()) + 1
1763 return len(self.heads()) + 1
1761
1764
1762 def clone(self, remote, heads=[], stream=False):
1765 def clone(self, remote, heads=[], stream=False):
1763 '''clone remote repository.
1766 '''clone remote repository.
1764
1767
1765 keyword arguments:
1768 keyword arguments:
1766 heads: list of revs to clone (forces use of pull)
1769 heads: list of revs to clone (forces use of pull)
1767 stream: use streaming clone if possible'''
1770 stream: use streaming clone if possible'''
1768
1771
1769 # now, all clients that can request uncompressed clones can
1772 # now, all clients that can request uncompressed clones can
1770 # read repo formats supported by all servers that can serve
1773 # read repo formats supported by all servers that can serve
1771 # them.
1774 # them.
1772
1775
1773 # if revlog format changes, client will have to check version
1776 # if revlog format changes, client will have to check version
1774 # and format flags on "stream" capability, and use
1777 # and format flags on "stream" capability, and use
1775 # uncompressed only if compatible.
1778 # uncompressed only if compatible.
1776
1779
1777 if stream and not heads and remote.capable('stream'):
1780 if stream and not heads and remote.capable('stream'):
1778 return self.stream_in(remote)
1781 return self.stream_in(remote)
1779 return self.pull(remote, heads)
1782 return self.pull(remote, heads)
1780
1783
1781 def pushkey(self, namespace, key, old, new):
1784 def pushkey(self, namespace, key, old, new):
1782 return pushkey.push(self, namespace, key, old, new)
1785 return pushkey.push(self, namespace, key, old, new)
1783
1786
1784 def listkeys(self, namespace):
1787 def listkeys(self, namespace):
1785 return pushkey.list(self, namespace)
1788 return pushkey.list(self, namespace)
1786
1789
1787 # used to avoid circular references so destructors work
1790 # used to avoid circular references so destructors work
1788 def aftertrans(files):
1791 def aftertrans(files):
1789 renamefiles = [tuple(t) for t in files]
1792 renamefiles = [tuple(t) for t in files]
1790 def a():
1793 def a():
1791 for src, dest in renamefiles:
1794 for src, dest in renamefiles:
1792 util.rename(src, dest)
1795 util.rename(src, dest)
1793 return a
1796 return a
1794
1797
1795 def instance(ui, path, create):
1798 def instance(ui, path, create):
1796 return localrepository(ui, util.drop_scheme('file', path), create)
1799 return localrepository(ui, util.drop_scheme('file', path), create)
1797
1800
1798 def islocal(path):
1801 def islocal(path):
1799 return True
1802 return True
@@ -1,527 +1,527 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, hex, bin
8 from node import nullid, nullrev, hex, bin
9 from i18n import _
9 from i18n import _
10 import util, filemerge, copies, subrepo
10 import util, filemerge, copies, subrepo
11 import errno, os, shutil
11 import errno, os, shutil
12
12
13 class mergestate(object):
13 class mergestate(object):
14 '''track 3-way merge state of individual files'''
14 '''track 3-way merge state of individual files'''
15 def __init__(self, repo):
15 def __init__(self, repo):
16 self._repo = repo
16 self._repo = repo
17 self._read()
17 self._read()
18 def reset(self, node=None):
18 def reset(self, node=None):
19 self._state = {}
19 self._state = {}
20 if node:
20 if node:
21 self._local = node
21 self._local = node
22 shutil.rmtree(self._repo.join("merge"), True)
22 shutil.rmtree(self._repo.join("merge"), True)
23 def _read(self):
23 def _read(self):
24 self._state = {}
24 self._state = {}
25 try:
25 try:
26 f = self._repo.opener("merge/state")
26 f = self._repo.opener("merge/state")
27 for i, l in enumerate(f):
27 for i, l in enumerate(f):
28 if i == 0:
28 if i == 0:
29 self._local = bin(l[:-1])
29 self._local = bin(l[:-1])
30 else:
30 else:
31 bits = l[:-1].split("\0")
31 bits = l[:-1].split("\0")
32 self._state[bits[0]] = bits[1:]
32 self._state[bits[0]] = bits[1:]
33 except IOError, err:
33 except IOError, err:
34 if err.errno != errno.ENOENT:
34 if err.errno != errno.ENOENT:
35 raise
35 raise
36 def _write(self):
36 def _write(self):
37 f = self._repo.opener("merge/state", "w")
37 f = self._repo.opener("merge/state", "w")
38 f.write(hex(self._local) + "\n")
38 f.write(hex(self._local) + "\n")
39 for d, v in self._state.iteritems():
39 for d, v in self._state.iteritems():
40 f.write("\0".join([d] + v) + "\n")
40 f.write("\0".join([d] + v) + "\n")
41 def add(self, fcl, fco, fca, fd, flags):
41 def add(self, fcl, fco, fca, fd, flags):
42 hash = util.sha1(fcl.path()).hexdigest()
42 hash = util.sha1(fcl.path()).hexdigest()
43 self._repo.opener("merge/" + hash, "w").write(fcl.data())
43 self._repo.opener("merge/" + hash, "w").write(fcl.data())
44 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
44 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
45 hex(fca.filenode()), fco.path(), flags]
45 hex(fca.filenode()), fco.path(), flags]
46 self._write()
46 self._write()
47 def __contains__(self, dfile):
47 def __contains__(self, dfile):
48 return dfile in self._state
48 return dfile in self._state
49 def __getitem__(self, dfile):
49 def __getitem__(self, dfile):
50 return self._state[dfile][0]
50 return self._state[dfile][0]
51 def __iter__(self):
51 def __iter__(self):
52 l = self._state.keys()
52 l = self._state.keys()
53 l.sort()
53 l.sort()
54 for f in l:
54 for f in l:
55 yield f
55 yield f
56 def mark(self, dfile, state):
56 def mark(self, dfile, state):
57 self._state[dfile][0] = state
57 self._state[dfile][0] = state
58 self._write()
58 self._write()
59 def resolve(self, dfile, wctx, octx):
59 def resolve(self, dfile, wctx, octx):
60 if self[dfile] == 'r':
60 if self[dfile] == 'r':
61 return 0
61 return 0
62 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
62 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
63 f = self._repo.opener("merge/" + hash)
63 f = self._repo.opener("merge/" + hash)
64 self._repo.wwrite(dfile, f.read(), flags)
64 self._repo.wwrite(dfile, f.read(), flags)
65 fcd = wctx[dfile]
65 fcd = wctx[dfile]
66 fco = octx[ofile]
66 fco = octx[ofile]
67 fca = self._repo.filectx(afile, fileid=anode)
67 fca = self._repo.filectx(afile, fileid=anode)
68 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
68 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
69 if not r:
69 if not r:
70 self.mark(dfile, 'r')
70 self.mark(dfile, 'r')
71 return r
71 return r
72
72
73 def _checkunknown(wctx, mctx):
73 def _checkunknown(wctx, mctx):
74 "check for collisions between unknown files and files in mctx"
74 "check for collisions between unknown files and files in mctx"
75 for f in wctx.unknown():
75 for f in wctx.unknown():
76 if f in mctx and mctx[f].cmp(wctx[f].data()):
76 if f in mctx and mctx[f].cmp(wctx[f]):
77 raise util.Abort(_("untracked file in working directory differs"
77 raise util.Abort(_("untracked file in working directory differs"
78 " from file in requested revision: '%s'") % f)
78 " from file in requested revision: '%s'") % f)
79
79
80 def _checkcollision(mctx):
80 def _checkcollision(mctx):
81 "check for case folding collisions in the destination context"
81 "check for case folding collisions in the destination context"
82 folded = {}
82 folded = {}
83 for fn in mctx:
83 for fn in mctx:
84 fold = fn.lower()
84 fold = fn.lower()
85 if fold in folded:
85 if fold in folded:
86 raise util.Abort(_("case-folding collision between %s and %s")
86 raise util.Abort(_("case-folding collision between %s and %s")
87 % (fn, folded[fold]))
87 % (fn, folded[fold]))
88 folded[fold] = fn
88 folded[fold] = fn
89
89
90 def _forgetremoved(wctx, mctx, branchmerge):
90 def _forgetremoved(wctx, mctx, branchmerge):
91 """
91 """
92 Forget removed files
92 Forget removed files
93
93
94 If we're jumping between revisions (as opposed to merging), and if
94 If we're jumping between revisions (as opposed to merging), and if
95 neither the working directory nor the target rev has the file,
95 neither the working directory nor the target rev has the file,
96 then we need to remove it from the dirstate, to prevent the
96 then we need to remove it from the dirstate, to prevent the
97 dirstate from listing the file when it is no longer in the
97 dirstate from listing the file when it is no longer in the
98 manifest.
98 manifest.
99
99
100 If we're merging, and the other revision has removed a file
100 If we're merging, and the other revision has removed a file
101 that is not present in the working directory, we need to mark it
101 that is not present in the working directory, we need to mark it
102 as removed.
102 as removed.
103 """
103 """
104
104
105 action = []
105 action = []
106 state = branchmerge and 'r' or 'f'
106 state = branchmerge and 'r' or 'f'
107 for f in wctx.deleted():
107 for f in wctx.deleted():
108 if f not in mctx:
108 if f not in mctx:
109 action.append((f, state))
109 action.append((f, state))
110
110
111 if not branchmerge:
111 if not branchmerge:
112 for f in wctx.removed():
112 for f in wctx.removed():
113 if f not in mctx:
113 if f not in mctx:
114 action.append((f, "f"))
114 action.append((f, "f"))
115
115
116 return action
116 return action
117
117
118 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
118 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
119 """
119 """
120 Merge p1 and p2 with ancestor ma and generate merge action list
120 Merge p1 and p2 with ancestor ma and generate merge action list
121
121
122 overwrite = whether we clobber working files
122 overwrite = whether we clobber working files
123 partial = function to filter file lists
123 partial = function to filter file lists
124 """
124 """
125
125
126 def fmerge(f, f2, fa):
126 def fmerge(f, f2, fa):
127 """merge flags"""
127 """merge flags"""
128 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
128 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
129 if m == n: # flags agree
129 if m == n: # flags agree
130 return m # unchanged
130 return m # unchanged
131 if m and n and not a: # flags set, don't agree, differ from parent
131 if m and n and not a: # flags set, don't agree, differ from parent
132 r = repo.ui.promptchoice(
132 r = repo.ui.promptchoice(
133 _(" conflicting flags for %s\n"
133 _(" conflicting flags for %s\n"
134 "(n)one, e(x)ec or sym(l)ink?") % f,
134 "(n)one, e(x)ec or sym(l)ink?") % f,
135 (_("&None"), _("E&xec"), _("Sym&link")), 0)
135 (_("&None"), _("E&xec"), _("Sym&link")), 0)
136 if r == 1:
136 if r == 1:
137 return "x" # Exec
137 return "x" # Exec
138 if r == 2:
138 if r == 2:
139 return "l" # Symlink
139 return "l" # Symlink
140 return ""
140 return ""
141 if m and m != a: # changed from a to m
141 if m and m != a: # changed from a to m
142 return m
142 return m
143 if n and n != a: # changed from a to n
143 if n and n != a: # changed from a to n
144 return n
144 return n
145 return '' # flag was cleared
145 return '' # flag was cleared
146
146
147 def act(msg, m, f, *args):
147 def act(msg, m, f, *args):
148 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
148 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
149 action.append((f, m) + args)
149 action.append((f, m) + args)
150
150
151 action, copy = [], {}
151 action, copy = [], {}
152
152
153 if overwrite:
153 if overwrite:
154 pa = p1
154 pa = p1
155 elif pa == p2: # backwards
155 elif pa == p2: # backwards
156 pa = p1.p1()
156 pa = p1.p1()
157 elif pa and repo.ui.configbool("merge", "followcopies", True):
157 elif pa and repo.ui.configbool("merge", "followcopies", True):
158 dirs = repo.ui.configbool("merge", "followdirs", True)
158 dirs = repo.ui.configbool("merge", "followdirs", True)
159 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
159 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
160 for of, fl in diverge.iteritems():
160 for of, fl in diverge.iteritems():
161 act("divergent renames", "dr", of, fl)
161 act("divergent renames", "dr", of, fl)
162
162
163 repo.ui.note(_("resolving manifests\n"))
163 repo.ui.note(_("resolving manifests\n"))
164 repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial)))
164 repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial)))
165 repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2))
165 repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2))
166
166
167 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
167 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
168 copied = set(copy.values())
168 copied = set(copy.values())
169
169
170 if '.hgsubstate' in m1:
170 if '.hgsubstate' in m1:
171 # check whether sub state is modified
171 # check whether sub state is modified
172 for s in p1.substate:
172 for s in p1.substate:
173 if p1.sub(s).dirty():
173 if p1.sub(s).dirty():
174 m1['.hgsubstate'] += "+"
174 m1['.hgsubstate'] += "+"
175 break
175 break
176
176
177 # Compare manifests
177 # Compare manifests
178 for f, n in m1.iteritems():
178 for f, n in m1.iteritems():
179 if partial and not partial(f):
179 if partial and not partial(f):
180 continue
180 continue
181 if f in m2:
181 if f in m2:
182 rflags = fmerge(f, f, f)
182 rflags = fmerge(f, f, f)
183 a = ma.get(f, nullid)
183 a = ma.get(f, nullid)
184 if n == m2[f] or m2[f] == a: # same or local newer
184 if n == m2[f] or m2[f] == a: # same or local newer
185 # is file locally modified or flags need changing?
185 # is file locally modified or flags need changing?
186 # dirstate flags may need to be made current
186 # dirstate flags may need to be made current
187 if m1.flags(f) != rflags or n[20:]:
187 if m1.flags(f) != rflags or n[20:]:
188 act("update permissions", "e", f, rflags)
188 act("update permissions", "e", f, rflags)
189 elif n == a: # remote newer
189 elif n == a: # remote newer
190 act("remote is newer", "g", f, rflags)
190 act("remote is newer", "g", f, rflags)
191 else: # both changed
191 else: # both changed
192 act("versions differ", "m", f, f, f, rflags, False)
192 act("versions differ", "m", f, f, f, rflags, False)
193 elif f in copied: # files we'll deal with on m2 side
193 elif f in copied: # files we'll deal with on m2 side
194 pass
194 pass
195 elif f in copy:
195 elif f in copy:
196 f2 = copy[f]
196 f2 = copy[f]
197 if f2 not in m2: # directory rename
197 if f2 not in m2: # directory rename
198 act("remote renamed directory to " + f2, "d",
198 act("remote renamed directory to " + f2, "d",
199 f, None, f2, m1.flags(f))
199 f, None, f2, m1.flags(f))
200 else: # case 2 A,B/B/B or case 4,21 A/B/B
200 else: # case 2 A,B/B/B or case 4,21 A/B/B
201 act("local copied/moved to " + f2, "m",
201 act("local copied/moved to " + f2, "m",
202 f, f2, f, fmerge(f, f2, f2), False)
202 f, f2, f, fmerge(f, f2, f2), False)
203 elif f in ma: # clean, a different, no remote
203 elif f in ma: # clean, a different, no remote
204 if n != ma[f]:
204 if n != ma[f]:
205 if repo.ui.promptchoice(
205 if repo.ui.promptchoice(
206 _(" local changed %s which remote deleted\n"
206 _(" local changed %s which remote deleted\n"
207 "use (c)hanged version or (d)elete?") % f,
207 "use (c)hanged version or (d)elete?") % f,
208 (_("&Changed"), _("&Delete")), 0):
208 (_("&Changed"), _("&Delete")), 0):
209 act("prompt delete", "r", f)
209 act("prompt delete", "r", f)
210 else:
210 else:
211 act("prompt keep", "a", f)
211 act("prompt keep", "a", f)
212 elif n[20:] == "a": # added, no remote
212 elif n[20:] == "a": # added, no remote
213 act("remote deleted", "f", f)
213 act("remote deleted", "f", f)
214 elif n[20:] != "u":
214 elif n[20:] != "u":
215 act("other deleted", "r", f)
215 act("other deleted", "r", f)
216
216
217 for f, n in m2.iteritems():
217 for f, n in m2.iteritems():
218 if partial and not partial(f):
218 if partial and not partial(f):
219 continue
219 continue
220 if f in m1 or f in copied: # files already visited
220 if f in m1 or f in copied: # files already visited
221 continue
221 continue
222 if f in copy:
222 if f in copy:
223 f2 = copy[f]
223 f2 = copy[f]
224 if f2 not in m1: # directory rename
224 if f2 not in m1: # directory rename
225 act("local renamed directory to " + f2, "d",
225 act("local renamed directory to " + f2, "d",
226 None, f, f2, m2.flags(f))
226 None, f, f2, m2.flags(f))
227 elif f2 in m2: # rename case 1, A/A,B/A
227 elif f2 in m2: # rename case 1, A/A,B/A
228 act("remote copied to " + f, "m",
228 act("remote copied to " + f, "m",
229 f2, f, f, fmerge(f2, f, f2), False)
229 f2, f, f, fmerge(f2, f, f2), False)
230 else: # case 3,20 A/B/A
230 else: # case 3,20 A/B/A
231 act("remote moved to " + f, "m",
231 act("remote moved to " + f, "m",
232 f2, f, f, fmerge(f2, f, f2), True)
232 f2, f, f, fmerge(f2, f, f2), True)
233 elif f not in ma:
233 elif f not in ma:
234 act("remote created", "g", f, m2.flags(f))
234 act("remote created", "g", f, m2.flags(f))
235 elif n != ma[f]:
235 elif n != ma[f]:
236 if repo.ui.promptchoice(
236 if repo.ui.promptchoice(
237 _("remote changed %s which local deleted\n"
237 _("remote changed %s which local deleted\n"
238 "use (c)hanged version or leave (d)eleted?") % f,
238 "use (c)hanged version or leave (d)eleted?") % f,
239 (_("&Changed"), _("&Deleted")), 0) == 0:
239 (_("&Changed"), _("&Deleted")), 0) == 0:
240 act("prompt recreating", "g", f, m2.flags(f))
240 act("prompt recreating", "g", f, m2.flags(f))
241
241
242 return action
242 return action
243
243
244 def actionkey(a):
244 def actionkey(a):
245 return a[1] == 'r' and -1 or 0, a
245 return a[1] == 'r' and -1 or 0, a
246
246
247 def applyupdates(repo, action, wctx, mctx, actx):
247 def applyupdates(repo, action, wctx, mctx, actx):
248 """apply the merge action list to the working directory
248 """apply the merge action list to the working directory
249
249
250 wctx is the working copy context
250 wctx is the working copy context
251 mctx is the context to be merged into the working copy
251 mctx is the context to be merged into the working copy
252 actx is the context of the common ancestor
252 actx is the context of the common ancestor
253 """
253 """
254
254
255 updated, merged, removed, unresolved = 0, 0, 0, 0
255 updated, merged, removed, unresolved = 0, 0, 0, 0
256 ms = mergestate(repo)
256 ms = mergestate(repo)
257 ms.reset(wctx.parents()[0].node())
257 ms.reset(wctx.parents()[0].node())
258 moves = []
258 moves = []
259 action.sort(key=actionkey)
259 action.sort(key=actionkey)
260 substate = wctx.substate # prime
260 substate = wctx.substate # prime
261
261
262 # prescan for merges
262 # prescan for merges
263 u = repo.ui
263 u = repo.ui
264 for a in action:
264 for a in action:
265 f, m = a[:2]
265 f, m = a[:2]
266 if m == 'm': # merge
266 if m == 'm': # merge
267 f2, fd, flags, move = a[2:]
267 f2, fd, flags, move = a[2:]
268 if f == '.hgsubstate': # merged internally
268 if f == '.hgsubstate': # merged internally
269 continue
269 continue
270 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
270 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
271 fcl = wctx[f]
271 fcl = wctx[f]
272 fco = mctx[f2]
272 fco = mctx[f2]
273 fca = fcl.ancestor(fco, actx) or repo.filectx(f, fileid=nullrev)
273 fca = fcl.ancestor(fco, actx) or repo.filectx(f, fileid=nullrev)
274 ms.add(fcl, fco, fca, fd, flags)
274 ms.add(fcl, fco, fca, fd, flags)
275 if f != fd and move:
275 if f != fd and move:
276 moves.append(f)
276 moves.append(f)
277
277
278 # remove renamed files after safely stored
278 # remove renamed files after safely stored
279 for f in moves:
279 for f in moves:
280 if util.lexists(repo.wjoin(f)):
280 if util.lexists(repo.wjoin(f)):
281 repo.ui.debug("removing %s\n" % f)
281 repo.ui.debug("removing %s\n" % f)
282 os.unlink(repo.wjoin(f))
282 os.unlink(repo.wjoin(f))
283
283
284 audit_path = util.path_auditor(repo.root)
284 audit_path = util.path_auditor(repo.root)
285
285
286 numupdates = len(action)
286 numupdates = len(action)
287 for i, a in enumerate(action):
287 for i, a in enumerate(action):
288 f, m = a[:2]
288 f, m = a[:2]
289 u.progress('update', i + 1, item=f, total=numupdates, unit='files')
289 u.progress('update', i + 1, item=f, total=numupdates, unit='files')
290 if f and f[0] == "/":
290 if f and f[0] == "/":
291 continue
291 continue
292 if m == "r": # remove
292 if m == "r": # remove
293 repo.ui.note(_("removing %s\n") % f)
293 repo.ui.note(_("removing %s\n") % f)
294 audit_path(f)
294 audit_path(f)
295 if f == '.hgsubstate': # subrepo states need updating
295 if f == '.hgsubstate': # subrepo states need updating
296 subrepo.submerge(repo, wctx, mctx, wctx)
296 subrepo.submerge(repo, wctx, mctx, wctx)
297 try:
297 try:
298 util.unlink(repo.wjoin(f))
298 util.unlink(repo.wjoin(f))
299 except OSError, inst:
299 except OSError, inst:
300 if inst.errno != errno.ENOENT:
300 if inst.errno != errno.ENOENT:
301 repo.ui.warn(_("update failed to remove %s: %s!\n") %
301 repo.ui.warn(_("update failed to remove %s: %s!\n") %
302 (f, inst.strerror))
302 (f, inst.strerror))
303 removed += 1
303 removed += 1
304 elif m == "m": # merge
304 elif m == "m": # merge
305 if f == '.hgsubstate': # subrepo states need updating
305 if f == '.hgsubstate': # subrepo states need updating
306 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx))
306 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx))
307 continue
307 continue
308 f2, fd, flags, move = a[2:]
308 f2, fd, flags, move = a[2:]
309 r = ms.resolve(fd, wctx, mctx)
309 r = ms.resolve(fd, wctx, mctx)
310 if r is not None and r > 0:
310 if r is not None and r > 0:
311 unresolved += 1
311 unresolved += 1
312 else:
312 else:
313 if r is None:
313 if r is None:
314 updated += 1
314 updated += 1
315 else:
315 else:
316 merged += 1
316 merged += 1
317 util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
317 util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
318 if f != fd and move and util.lexists(repo.wjoin(f)):
318 if f != fd and move and util.lexists(repo.wjoin(f)):
319 repo.ui.debug("removing %s\n" % f)
319 repo.ui.debug("removing %s\n" % f)
320 os.unlink(repo.wjoin(f))
320 os.unlink(repo.wjoin(f))
321 elif m == "g": # get
321 elif m == "g": # get
322 flags = a[2]
322 flags = a[2]
323 repo.ui.note(_("getting %s\n") % f)
323 repo.ui.note(_("getting %s\n") % f)
324 t = mctx.filectx(f).data()
324 t = mctx.filectx(f).data()
325 repo.wwrite(f, t, flags)
325 repo.wwrite(f, t, flags)
326 updated += 1
326 updated += 1
327 if f == '.hgsubstate': # subrepo states need updating
327 if f == '.hgsubstate': # subrepo states need updating
328 subrepo.submerge(repo, wctx, mctx, wctx)
328 subrepo.submerge(repo, wctx, mctx, wctx)
329 elif m == "d": # directory rename
329 elif m == "d": # directory rename
330 f2, fd, flags = a[2:]
330 f2, fd, flags = a[2:]
331 if f:
331 if f:
332 repo.ui.note(_("moving %s to %s\n") % (f, fd))
332 repo.ui.note(_("moving %s to %s\n") % (f, fd))
333 t = wctx.filectx(f).data()
333 t = wctx.filectx(f).data()
334 repo.wwrite(fd, t, flags)
334 repo.wwrite(fd, t, flags)
335 util.unlink(repo.wjoin(f))
335 util.unlink(repo.wjoin(f))
336 if f2:
336 if f2:
337 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
337 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
338 t = mctx.filectx(f2).data()
338 t = mctx.filectx(f2).data()
339 repo.wwrite(fd, t, flags)
339 repo.wwrite(fd, t, flags)
340 updated += 1
340 updated += 1
341 elif m == "dr": # divergent renames
341 elif m == "dr": # divergent renames
342 fl = a[2]
342 fl = a[2]
343 repo.ui.warn(_("warning: detected divergent renames of %s to:\n") % f)
343 repo.ui.warn(_("warning: detected divergent renames of %s to:\n") % f)
344 for nf in fl:
344 for nf in fl:
345 repo.ui.warn(" %s\n" % nf)
345 repo.ui.warn(" %s\n" % nf)
346 elif m == "e": # exec
346 elif m == "e": # exec
347 flags = a[2]
347 flags = a[2]
348 util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
348 util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
349 u.progress('update', None, total=numupdates, unit='files')
349 u.progress('update', None, total=numupdates, unit='files')
350
350
351 return updated, merged, removed, unresolved
351 return updated, merged, removed, unresolved
352
352
353 def recordupdates(repo, action, branchmerge):
353 def recordupdates(repo, action, branchmerge):
354 "record merge actions to the dirstate"
354 "record merge actions to the dirstate"
355
355
356 for a in action:
356 for a in action:
357 f, m = a[:2]
357 f, m = a[:2]
358 if m == "r": # remove
358 if m == "r": # remove
359 if branchmerge:
359 if branchmerge:
360 repo.dirstate.remove(f)
360 repo.dirstate.remove(f)
361 else:
361 else:
362 repo.dirstate.forget(f)
362 repo.dirstate.forget(f)
363 elif m == "a": # re-add
363 elif m == "a": # re-add
364 if not branchmerge:
364 if not branchmerge:
365 repo.dirstate.add(f)
365 repo.dirstate.add(f)
366 elif m == "f": # forget
366 elif m == "f": # forget
367 repo.dirstate.forget(f)
367 repo.dirstate.forget(f)
368 elif m == "e": # exec change
368 elif m == "e": # exec change
369 repo.dirstate.normallookup(f)
369 repo.dirstate.normallookup(f)
370 elif m == "g": # get
370 elif m == "g": # get
371 if branchmerge:
371 if branchmerge:
372 repo.dirstate.otherparent(f)
372 repo.dirstate.otherparent(f)
373 else:
373 else:
374 repo.dirstate.normal(f)
374 repo.dirstate.normal(f)
375 elif m == "m": # merge
375 elif m == "m": # merge
376 f2, fd, flag, move = a[2:]
376 f2, fd, flag, move = a[2:]
377 if branchmerge:
377 if branchmerge:
378 # We've done a branch merge, mark this file as merged
378 # We've done a branch merge, mark this file as merged
379 # so that we properly record the merger later
379 # so that we properly record the merger later
380 repo.dirstate.merge(fd)
380 repo.dirstate.merge(fd)
381 if f != f2: # copy/rename
381 if f != f2: # copy/rename
382 if move:
382 if move:
383 repo.dirstate.remove(f)
383 repo.dirstate.remove(f)
384 if f != fd:
384 if f != fd:
385 repo.dirstate.copy(f, fd)
385 repo.dirstate.copy(f, fd)
386 else:
386 else:
387 repo.dirstate.copy(f2, fd)
387 repo.dirstate.copy(f2, fd)
388 else:
388 else:
389 # We've update-merged a locally modified file, so
389 # We've update-merged a locally modified file, so
390 # we set the dirstate to emulate a normal checkout
390 # we set the dirstate to emulate a normal checkout
391 # of that file some time in the past. Thus our
391 # of that file some time in the past. Thus our
392 # merge will appear as a normal local file
392 # merge will appear as a normal local file
393 # modification.
393 # modification.
394 if f2 == fd: # file not locally copied/moved
394 if f2 == fd: # file not locally copied/moved
395 repo.dirstate.normallookup(fd)
395 repo.dirstate.normallookup(fd)
396 if move:
396 if move:
397 repo.dirstate.forget(f)
397 repo.dirstate.forget(f)
398 elif m == "d": # directory rename
398 elif m == "d": # directory rename
399 f2, fd, flag = a[2:]
399 f2, fd, flag = a[2:]
400 if not f2 and f not in repo.dirstate:
400 if not f2 and f not in repo.dirstate:
401 # untracked file moved
401 # untracked file moved
402 continue
402 continue
403 if branchmerge:
403 if branchmerge:
404 repo.dirstate.add(fd)
404 repo.dirstate.add(fd)
405 if f:
405 if f:
406 repo.dirstate.remove(f)
406 repo.dirstate.remove(f)
407 repo.dirstate.copy(f, fd)
407 repo.dirstate.copy(f, fd)
408 if f2:
408 if f2:
409 repo.dirstate.copy(f2, fd)
409 repo.dirstate.copy(f2, fd)
410 else:
410 else:
411 repo.dirstate.normal(fd)
411 repo.dirstate.normal(fd)
412 if f:
412 if f:
413 repo.dirstate.forget(f)
413 repo.dirstate.forget(f)
414
414
415 def update(repo, node, branchmerge, force, partial):
415 def update(repo, node, branchmerge, force, partial):
416 """
416 """
417 Perform a merge between the working directory and the given node
417 Perform a merge between the working directory and the given node
418
418
419 node = the node to update to, or None if unspecified
419 node = the node to update to, or None if unspecified
420 branchmerge = whether to merge between branches
420 branchmerge = whether to merge between branches
421 force = whether to force branch merging or file overwriting
421 force = whether to force branch merging or file overwriting
422 partial = a function to filter file lists (dirstate not updated)
422 partial = a function to filter file lists (dirstate not updated)
423
423
424 The table below shows all the behaviors of the update command
424 The table below shows all the behaviors of the update command
425 given the -c and -C or no options, whether the working directory
425 given the -c and -C or no options, whether the working directory
426 is dirty, whether a revision is specified, and the relationship of
426 is dirty, whether a revision is specified, and the relationship of
427 the parent rev to the target rev (linear, on the same named
427 the parent rev to the target rev (linear, on the same named
428 branch, or on another named branch).
428 branch, or on another named branch).
429
429
430 This logic is tested by test-update-branches.
430 This logic is tested by test-update-branches.
431
431
432 -c -C dirty rev | linear same cross
432 -c -C dirty rev | linear same cross
433 n n n n | ok (1) x
433 n n n n | ok (1) x
434 n n n y | ok ok ok
434 n n n y | ok ok ok
435 n n y * | merge (2) (2)
435 n n y * | merge (2) (2)
436 n y * * | --- discard ---
436 n y * * | --- discard ---
437 y n y * | --- (3) ---
437 y n y * | --- (3) ---
438 y n n * | --- ok ---
438 y n n * | --- ok ---
439 y y * * | --- (4) ---
439 y y * * | --- (4) ---
440
440
441 x = can't happen
441 x = can't happen
442 * = don't-care
442 * = don't-care
443 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
443 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
444 2 = abort: crosses branches (use 'hg merge' to merge or
444 2 = abort: crosses branches (use 'hg merge' to merge or
445 use 'hg update -C' to discard changes)
445 use 'hg update -C' to discard changes)
446 3 = abort: uncommitted local changes
446 3 = abort: uncommitted local changes
447 4 = incompatible options (checked in commands.py)
447 4 = incompatible options (checked in commands.py)
448 """
448 """
449
449
450 onode = node
450 onode = node
451 wlock = repo.wlock()
451 wlock = repo.wlock()
452 try:
452 try:
453 wc = repo[None]
453 wc = repo[None]
454 if node is None:
454 if node is None:
455 # tip of current branch
455 # tip of current branch
456 try:
456 try:
457 node = repo.branchtags()[wc.branch()]
457 node = repo.branchtags()[wc.branch()]
458 except KeyError:
458 except KeyError:
459 if wc.branch() == "default": # no default branch!
459 if wc.branch() == "default": # no default branch!
460 node = repo.lookup("tip") # update to tip
460 node = repo.lookup("tip") # update to tip
461 else:
461 else:
462 raise util.Abort(_("branch %s not found") % wc.branch())
462 raise util.Abort(_("branch %s not found") % wc.branch())
463 overwrite = force and not branchmerge
463 overwrite = force and not branchmerge
464 pl = wc.parents()
464 pl = wc.parents()
465 p1, p2 = pl[0], repo[node]
465 p1, p2 = pl[0], repo[node]
466 pa = p1.ancestor(p2)
466 pa = p1.ancestor(p2)
467 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
467 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
468 fastforward = False
468 fastforward = False
469
469
470 ### check phase
470 ### check phase
471 if not overwrite and len(pl) > 1:
471 if not overwrite and len(pl) > 1:
472 raise util.Abort(_("outstanding uncommitted merges"))
472 raise util.Abort(_("outstanding uncommitted merges"))
473 if branchmerge:
473 if branchmerge:
474 if pa == p2:
474 if pa == p2:
475 raise util.Abort(_("merging with a working directory ancestor"
475 raise util.Abort(_("merging with a working directory ancestor"
476 " has no effect"))
476 " has no effect"))
477 elif pa == p1:
477 elif pa == p1:
478 if p1.branch() != p2.branch():
478 if p1.branch() != p2.branch():
479 fastforward = True
479 fastforward = True
480 else:
480 else:
481 raise util.Abort(_("nothing to merge (use 'hg update'"
481 raise util.Abort(_("nothing to merge (use 'hg update'"
482 " or check 'hg heads')"))
482 " or check 'hg heads')"))
483 if not force and (wc.files() or wc.deleted()):
483 if not force and (wc.files() or wc.deleted()):
484 raise util.Abort(_("outstanding uncommitted changes "
484 raise util.Abort(_("outstanding uncommitted changes "
485 "(use 'hg status' to list changes)"))
485 "(use 'hg status' to list changes)"))
486 elif not overwrite:
486 elif not overwrite:
487 if pa == p1 or pa == p2: # linear
487 if pa == p1 or pa == p2: # linear
488 pass # all good
488 pass # all good
489 elif wc.files() or wc.deleted():
489 elif wc.files() or wc.deleted():
490 raise util.Abort(_("crosses branches (use 'hg merge' to merge "
490 raise util.Abort(_("crosses branches (use 'hg merge' to merge "
491 "or use 'hg update -C' to discard changes)"))
491 "or use 'hg update -C' to discard changes)"))
492 elif onode is None:
492 elif onode is None:
493 raise util.Abort(_("crosses branches (use 'hg merge' or use "
493 raise util.Abort(_("crosses branches (use 'hg merge' or use "
494 "'hg update -c')"))
494 "'hg update -c')"))
495 else:
495 else:
496 # Allow jumping branches if clean and specific rev given
496 # Allow jumping branches if clean and specific rev given
497 overwrite = True
497 overwrite = True
498
498
499 ### calculate phase
499 ### calculate phase
500 action = []
500 action = []
501 wc.status(unknown=True) # prime cache
501 wc.status(unknown=True) # prime cache
502 if not force:
502 if not force:
503 _checkunknown(wc, p2)
503 _checkunknown(wc, p2)
504 if not util.checkcase(repo.path):
504 if not util.checkcase(repo.path):
505 _checkcollision(p2)
505 _checkcollision(p2)
506 action += _forgetremoved(wc, p2, branchmerge)
506 action += _forgetremoved(wc, p2, branchmerge)
507 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
507 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
508
508
509 ### apply phase
509 ### apply phase
510 if not branchmerge: # just jump to the new rev
510 if not branchmerge: # just jump to the new rev
511 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
511 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
512 if not partial:
512 if not partial:
513 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
513 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
514
514
515 stats = applyupdates(repo, action, wc, p2, pa)
515 stats = applyupdates(repo, action, wc, p2, pa)
516
516
517 if not partial:
517 if not partial:
518 repo.dirstate.setparents(fp1, fp2)
518 repo.dirstate.setparents(fp1, fp2)
519 recordupdates(repo, action, branchmerge)
519 recordupdates(repo, action, branchmerge)
520 if not branchmerge and not fastforward:
520 if not branchmerge and not fastforward:
521 repo.dirstate.setbranch(p2.branch())
521 repo.dirstate.setbranch(p2.branch())
522 finally:
522 finally:
523 wlock.release()
523 wlock.release()
524
524
525 if not partial:
525 if not partial:
526 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
526 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
527 return stats
527 return stats
@@ -1,572 +1,581 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import parser, util, error, discovery
9 import parser, util, error, discovery
10 import match as _match
10 import match as _match
11 from i18n import _
11 from i18n import _
12
12
13 elements = {
13 elements = {
14 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
14 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
15 "-": (19, ("negate", 19), ("minus", 19)),
15 "-": (19, ("negate", 19), ("minus", 19)),
16 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
16 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
17 ("dagrangepost", 17)),
17 ("dagrangepost", 17)),
18 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
18 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
19 ("dagrangepost", 17)),
19 ("dagrangepost", 17)),
20 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
20 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
21 "not": (10, ("not", 10)),
21 "not": (10, ("not", 10)),
22 "!": (10, ("not", 10)),
22 "!": (10, ("not", 10)),
23 "and": (5, None, ("and", 5)),
23 "and": (5, None, ("and", 5)),
24 "&": (5, None, ("and", 5)),
24 "&": (5, None, ("and", 5)),
25 "or": (4, None, ("or", 4)),
25 "or": (4, None, ("or", 4)),
26 "|": (4, None, ("or", 4)),
26 "|": (4, None, ("or", 4)),
27 "+": (4, None, ("or", 4)),
27 "+": (4, None, ("or", 4)),
28 ",": (2, None, ("list", 2)),
28 ",": (2, None, ("list", 2)),
29 ")": (0, None, None),
29 ")": (0, None, None),
30 "symbol": (0, ("symbol",), None),
30 "symbol": (0, ("symbol",), None),
31 "string": (0, ("string",), None),
31 "string": (0, ("string",), None),
32 "end": (0, None, None),
32 "end": (0, None, None),
33 }
33 }
34
34
35 keywords = set(['and', 'or', 'not'])
35 keywords = set(['and', 'or', 'not'])
36
36
37 def tokenize(program):
37 def tokenize(program):
38 pos, l = 0, len(program)
38 pos, l = 0, len(program)
39 while pos < l:
39 while pos < l:
40 c = program[pos]
40 c = program[pos]
41 if c.isspace(): # skip inter-token whitespace
41 if c.isspace(): # skip inter-token whitespace
42 pass
42 pass
43 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
43 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
44 yield ('::', None, pos)
44 yield ('::', None, pos)
45 pos += 1 # skip ahead
45 pos += 1 # skip ahead
46 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
46 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
47 yield ('..', None, pos)
47 yield ('..', None, pos)
48 pos += 1 # skip ahead
48 pos += 1 # skip ahead
49 elif c in "():,-|&+!": # handle simple operators
49 elif c in "():,-|&+!": # handle simple operators
50 yield (c, None, pos)
50 yield (c, None, pos)
51 elif c in '"\'': # handle quoted strings
51 elif c in '"\'': # handle quoted strings
52 pos += 1
52 pos += 1
53 s = pos
53 s = pos
54 while pos < l: # find closing quote
54 while pos < l: # find closing quote
55 d = program[pos]
55 d = program[pos]
56 if d == '\\': # skip over escaped characters
56 if d == '\\': # skip over escaped characters
57 pos += 2
57 pos += 2
58 continue
58 continue
59 if d == c:
59 if d == c:
60 yield ('string', program[s:pos].decode('string-escape'), s)
60 yield ('string', program[s:pos].decode('string-escape'), s)
61 break
61 break
62 pos += 1
62 pos += 1
63 else:
63 else:
64 raise error.ParseError(_("unterminated string"), s)
64 raise error.ParseError(_("unterminated string"), s)
65 elif c.isalnum() or c in '._' or ord(c) > 127: # gather up a symbol/keyword
65 elif c.isalnum() or c in '._' or ord(c) > 127: # gather up a symbol/keyword
66 s = pos
66 s = pos
67 pos += 1
67 pos += 1
68 while pos < l: # find end of symbol
68 while pos < l: # find end of symbol
69 d = program[pos]
69 d = program[pos]
70 if not (d.isalnum() or d in "._" or ord(d) > 127):
70 if not (d.isalnum() or d in "._" or ord(d) > 127):
71 break
71 break
72 if d == '.' and program[pos - 1] == '.': # special case for ..
72 if d == '.' and program[pos - 1] == '.': # special case for ..
73 pos -= 1
73 pos -= 1
74 break
74 break
75 pos += 1
75 pos += 1
76 sym = program[s:pos]
76 sym = program[s:pos]
77 if sym in keywords: # operator keywords
77 if sym in keywords: # operator keywords
78 yield (sym, None, s)
78 yield (sym, None, s)
79 else:
79 else:
80 yield ('symbol', sym, s)
80 yield ('symbol', sym, s)
81 pos -= 1
81 pos -= 1
82 else:
82 else:
83 raise error.ParseError(_("syntax error"), pos)
83 raise error.ParseError(_("syntax error"), pos)
84 pos += 1
84 pos += 1
85 yield ('end', None, pos)
85 yield ('end', None, pos)
86
86
87 # helpers
87 # helpers
88
88
89 def getstring(x, err):
89 def getstring(x, err):
90 if x and (x[0] == 'string' or x[0] == 'symbol'):
90 if x and (x[0] == 'string' or x[0] == 'symbol'):
91 return x[1]
91 return x[1]
92 raise error.ParseError(err)
92 raise error.ParseError(err)
93
93
94 def getlist(x):
94 def getlist(x):
95 if not x:
95 if not x:
96 return []
96 return []
97 if x[0] == 'list':
97 if x[0] == 'list':
98 return getlist(x[1]) + [x[2]]
98 return getlist(x[1]) + [x[2]]
99 return [x]
99 return [x]
100
100
101 def getargs(x, min, max, err):
101 def getargs(x, min, max, err):
102 l = getlist(x)
102 l = getlist(x)
103 if len(l) < min or len(l) > max:
103 if len(l) < min or len(l) > max:
104 raise error.ParseError(err)
104 raise error.ParseError(err)
105 return l
105 return l
106
106
107 def getset(repo, subset, x):
107 def getset(repo, subset, x):
108 if not x:
108 if not x:
109 raise error.ParseError(_("missing argument"))
109 raise error.ParseError(_("missing argument"))
110 return methods[x[0]](repo, subset, *x[1:])
110 return methods[x[0]](repo, subset, *x[1:])
111
111
112 # operator methods
112 # operator methods
113
113
114 def stringset(repo, subset, x):
114 def stringset(repo, subset, x):
115 x = repo[x].rev()
115 x = repo[x].rev()
116 if x == -1 and len(subset) == len(repo):
116 if x == -1 and len(subset) == len(repo):
117 return [-1]
117 return [-1]
118 if x in subset:
118 if x in subset:
119 return [x]
119 return [x]
120 return []
120 return []
121
121
122 def symbolset(repo, subset, x):
122 def symbolset(repo, subset, x):
123 if x in symbols:
123 if x in symbols:
124 raise error.ParseError(_("can't use %s here") % x)
124 raise error.ParseError(_("can't use %s here") % x)
125 return stringset(repo, subset, x)
125 return stringset(repo, subset, x)
126
126
127 def rangeset(repo, subset, x, y):
127 def rangeset(repo, subset, x, y):
128 m = getset(repo, subset, x)
128 m = getset(repo, subset, x)
129 if not m:
129 if not m:
130 m = getset(repo, range(len(repo)), x)
130 m = getset(repo, range(len(repo)), x)
131
131
132 n = getset(repo, subset, y)
132 n = getset(repo, subset, y)
133 if not n:
133 if not n:
134 n = getset(repo, range(len(repo)), y)
134 n = getset(repo, range(len(repo)), y)
135
135
136 if not m or not n:
136 if not m or not n:
137 return []
137 return []
138 m, n = m[0], n[-1]
138 m, n = m[0], n[-1]
139
139
140 if m < n:
140 if m < n:
141 r = range(m, n + 1)
141 r = range(m, n + 1)
142 else:
142 else:
143 r = range(m, n - 1, -1)
143 r = range(m, n - 1, -1)
144 s = set(subset)
144 s = set(subset)
145 return [x for x in r if x in s]
145 return [x for x in r if x in s]
146
146
147 def andset(repo, subset, x, y):
147 def andset(repo, subset, x, y):
148 return getset(repo, getset(repo, subset, x), y)
148 return getset(repo, getset(repo, subset, x), y)
149
149
150 def orset(repo, subset, x, y):
150 def orset(repo, subset, x, y):
151 s = set(getset(repo, subset, x))
151 s = set(getset(repo, subset, x))
152 s |= set(getset(repo, [r for r in subset if r not in s], y))
152 s |= set(getset(repo, [r for r in subset if r not in s], y))
153 return [r for r in subset if r in s]
153 return [r for r in subset if r in s]
154
154
155 def notset(repo, subset, x):
155 def notset(repo, subset, x):
156 s = set(getset(repo, subset, x))
156 s = set(getset(repo, subset, x))
157 return [r for r in subset if r not in s]
157 return [r for r in subset if r not in s]
158
158
159 def listset(repo, subset, a, b):
159 def listset(repo, subset, a, b):
160 raise error.ParseError(_("can't use a list in this context"))
160 raise error.ParseError(_("can't use a list in this context"))
161
161
162 def func(repo, subset, a, b):
162 def func(repo, subset, a, b):
163 if a[0] == 'symbol' and a[1] in symbols:
163 if a[0] == 'symbol' and a[1] in symbols:
164 return symbols[a[1]](repo, subset, b)
164 return symbols[a[1]](repo, subset, b)
165 raise error.ParseError(_("not a function: %s") % a[1])
165 raise error.ParseError(_("not a function: %s") % a[1])
166
166
167 # functions
167 # functions
168
168
169 def p1(repo, subset, x):
169 def p1(repo, subset, x):
170 ps = set()
170 ps = set()
171 cl = repo.changelog
171 cl = repo.changelog
172 for r in getset(repo, subset, x):
172 for r in getset(repo, subset, x):
173 ps.add(cl.parentrevs(r)[0])
173 ps.add(cl.parentrevs(r)[0])
174 return [r for r in subset if r in ps]
174 return [r for r in subset if r in ps]
175
175
176 def p2(repo, subset, x):
176 def p2(repo, subset, x):
177 ps = set()
177 ps = set()
178 cl = repo.changelog
178 cl = repo.changelog
179 for r in getset(repo, subset, x):
179 for r in getset(repo, subset, x):
180 ps.add(cl.parentrevs(r)[1])
180 ps.add(cl.parentrevs(r)[1])
181 return [r for r in subset if r in ps]
181 return [r for r in subset if r in ps]
182
182
183 def parents(repo, subset, x):
183 def parents(repo, subset, x):
184 ps = set()
184 ps = set()
185 cl = repo.changelog
185 cl = repo.changelog
186 for r in getset(repo, subset, x):
186 for r in getset(repo, subset, x):
187 ps.update(cl.parentrevs(r))
187 ps.update(cl.parentrevs(r))
188 return [r for r in subset if r in ps]
188 return [r for r in subset if r in ps]
189
189
190 def maxrev(repo, subset, x):
190 def maxrev(repo, subset, x):
191 s = getset(repo, subset, x)
191 s = getset(repo, subset, x)
192 if s:
192 if s:
193 m = max(s)
193 m = max(s)
194 if m in subset:
194 if m in subset:
195 return [m]
195 return [m]
196 return []
196 return []
197
197
198 def minrev(repo, subset, x):
199 s = getset(repo, subset, x)
200 if s:
201 m = min(s)
202 if m in subset:
203 return [m]
204 return []
205
198 def limit(repo, subset, x):
206 def limit(repo, subset, x):
199 l = getargs(x, 2, 2, _("limit wants two arguments"))
207 l = getargs(x, 2, 2, _("limit wants two arguments"))
200 try:
208 try:
201 lim = int(getstring(l[1], _("limit wants a number")))
209 lim = int(getstring(l[1], _("limit wants a number")))
202 except ValueError:
210 except ValueError:
203 raise error.ParseError(_("limit expects a number"))
211 raise error.ParseError(_("limit expects a number"))
204 return getset(repo, subset, l[0])[:lim]
212 return getset(repo, subset, l[0])[:lim]
205
213
206 def children(repo, subset, x):
214 def children(repo, subset, x):
207 cs = set()
215 cs = set()
208 cl = repo.changelog
216 cl = repo.changelog
209 s = set(getset(repo, subset, x))
217 s = set(getset(repo, subset, x))
210 for r in xrange(0, len(repo)):
218 for r in xrange(0, len(repo)):
211 for p in cl.parentrevs(r):
219 for p in cl.parentrevs(r):
212 if p in s:
220 if p in s:
213 cs.add(r)
221 cs.add(r)
214 return [r for r in subset if r in cs]
222 return [r for r in subset if r in cs]
215
223
216 def branch(repo, subset, x):
224 def branch(repo, subset, x):
217 s = getset(repo, range(len(repo)), x)
225 s = getset(repo, range(len(repo)), x)
218 b = set()
226 b = set()
219 for r in s:
227 for r in s:
220 b.add(repo[r].branch())
228 b.add(repo[r].branch())
221 s = set(s)
229 s = set(s)
222 return [r for r in subset if r in s or repo[r].branch() in b]
230 return [r for r in subset if r in s or repo[r].branch() in b]
223
231
224 def ancestor(repo, subset, x):
232 def ancestor(repo, subset, x):
225 l = getargs(x, 2, 2, _("ancestor wants two arguments"))
233 l = getargs(x, 2, 2, _("ancestor wants two arguments"))
226 r = range(len(repo))
234 r = range(len(repo))
227 a = getset(repo, r, l[0])
235 a = getset(repo, r, l[0])
228 b = getset(repo, r, l[1])
236 b = getset(repo, r, l[1])
229 if len(a) != 1 or len(b) != 1:
237 if len(a) != 1 or len(b) != 1:
230 raise error.ParseError(_("ancestor arguments must be single revisions"))
238 raise error.ParseError(_("ancestor arguments must be single revisions"))
231 an = [repo[a[0]].ancestor(repo[b[0]]).rev()]
239 an = [repo[a[0]].ancestor(repo[b[0]]).rev()]
232
240
233 return [r for r in an if r in subset]
241 return [r for r in an if r in subset]
234
242
235 def ancestors(repo, subset, x):
243 def ancestors(repo, subset, x):
236 args = getset(repo, range(len(repo)), x)
244 args = getset(repo, range(len(repo)), x)
237 if not args:
245 if not args:
238 return []
246 return []
239 s = set(repo.changelog.ancestors(*args)) | set(args)
247 s = set(repo.changelog.ancestors(*args)) | set(args)
240 return [r for r in subset if r in s]
248 return [r for r in subset if r in s]
241
249
242 def descendants(repo, subset, x):
250 def descendants(repo, subset, x):
243 args = getset(repo, range(len(repo)), x)
251 args = getset(repo, range(len(repo)), x)
244 if not args:
252 if not args:
245 return []
253 return []
246 s = set(repo.changelog.descendants(*args)) | set(args)
254 s = set(repo.changelog.descendants(*args)) | set(args)
247 return [r for r in subset if r in s]
255 return [r for r in subset if r in s]
248
256
249 def follow(repo, subset, x):
257 def follow(repo, subset, x):
250 getargs(x, 0, 0, _("follow takes no arguments"))
258 getargs(x, 0, 0, _("follow takes no arguments"))
251 p = repo['.'].rev()
259 p = repo['.'].rev()
252 s = set(repo.changelog.ancestors(p)) | set([p])
260 s = set(repo.changelog.ancestors(p)) | set([p])
253 return [r for r in subset if r in s]
261 return [r for r in subset if r in s]
254
262
255 def date(repo, subset, x):
263 def date(repo, subset, x):
256 ds = getstring(x, _("date wants a string"))
264 ds = getstring(x, _("date wants a string"))
257 dm = util.matchdate(ds)
265 dm = util.matchdate(ds)
258 return [r for r in subset if dm(repo[r].date()[0])]
266 return [r for r in subset if dm(repo[r].date()[0])]
259
267
260 def keyword(repo, subset, x):
268 def keyword(repo, subset, x):
261 kw = getstring(x, _("keyword wants a string")).lower()
269 kw = getstring(x, _("keyword wants a string")).lower()
262 l = []
270 l = []
263 for r in subset:
271 for r in subset:
264 c = repo[r]
272 c = repo[r]
265 t = " ".join(c.files() + [c.user(), c.description()])
273 t = " ".join(c.files() + [c.user(), c.description()])
266 if kw in t.lower():
274 if kw in t.lower():
267 l.append(r)
275 l.append(r)
268 return l
276 return l
269
277
270 def grep(repo, subset, x):
278 def grep(repo, subset, x):
271 gr = re.compile(getstring(x, _("grep wants a string")))
279 gr = re.compile(getstring(x, _("grep wants a string")))
272 l = []
280 l = []
273 for r in subset:
281 for r in subset:
274 c = repo[r]
282 c = repo[r]
275 for e in c.files() + [c.user(), c.description()]:
283 for e in c.files() + [c.user(), c.description()]:
276 if gr.search(e):
284 if gr.search(e):
277 l.append(r)
285 l.append(r)
278 continue
286 continue
279 return l
287 return l
280
288
281 def author(repo, subset, x):
289 def author(repo, subset, x):
282 n = getstring(x, _("author wants a string")).lower()
290 n = getstring(x, _("author wants a string")).lower()
283 return [r for r in subset if n in repo[r].user().lower()]
291 return [r for r in subset if n in repo[r].user().lower()]
284
292
285 def hasfile(repo, subset, x):
293 def hasfile(repo, subset, x):
286 pat = getstring(x, _("file wants a pattern"))
294 pat = getstring(x, _("file wants a pattern"))
287 m = _match.match(repo.root, repo.getcwd(), [pat])
295 m = _match.match(repo.root, repo.getcwd(), [pat])
288 s = []
296 s = []
289 for r in subset:
297 for r in subset:
290 for f in repo[r].files():
298 for f in repo[r].files():
291 if m(f):
299 if m(f):
292 s.append(r)
300 s.append(r)
293 continue
301 continue
294 return s
302 return s
295
303
296 def contains(repo, subset, x):
304 def contains(repo, subset, x):
297 pat = getstring(x, _("contains wants a pattern"))
305 pat = getstring(x, _("contains wants a pattern"))
298 m = _match.match(repo.root, repo.getcwd(), [pat])
306 m = _match.match(repo.root, repo.getcwd(), [pat])
299 s = []
307 s = []
300 if m.files() == [pat]:
308 if m.files() == [pat]:
301 for r in subset:
309 for r in subset:
302 if pat in repo[r]:
310 if pat in repo[r]:
303 s.append(r)
311 s.append(r)
304 continue
312 continue
305 else:
313 else:
306 for r in subset:
314 for r in subset:
307 for f in repo[r].manifest():
315 for f in repo[r].manifest():
308 if m(f):
316 if m(f):
309 s.append(r)
317 s.append(r)
310 continue
318 continue
311 return s
319 return s
312
320
313 def checkstatus(repo, subset, pat, field):
321 def checkstatus(repo, subset, pat, field):
314 m = _match.match(repo.root, repo.getcwd(), [pat])
322 m = _match.match(repo.root, repo.getcwd(), [pat])
315 s = []
323 s = []
316 fast = (m.files() == [pat])
324 fast = (m.files() == [pat])
317 for r in subset:
325 for r in subset:
318 c = repo[r]
326 c = repo[r]
319 if fast:
327 if fast:
320 if pat not in c.files():
328 if pat not in c.files():
321 continue
329 continue
322 else:
330 else:
323 for f in c.files():
331 for f in c.files():
324 if m(f):
332 if m(f):
325 break
333 break
326 else:
334 else:
327 continue
335 continue
328 files = repo.status(c.p1().node(), c.node())[field]
336 files = repo.status(c.p1().node(), c.node())[field]
329 if fast:
337 if fast:
330 if pat in files:
338 if pat in files:
331 s.append(r)
339 s.append(r)
332 continue
340 continue
333 else:
341 else:
334 for f in files:
342 for f in files:
335 if m(f):
343 if m(f):
336 s.append(r)
344 s.append(r)
337 continue
345 continue
338 return s
346 return s
339
347
340 def modifies(repo, subset, x):
348 def modifies(repo, subset, x):
341 pat = getstring(x, _("modifies wants a pattern"))
349 pat = getstring(x, _("modifies wants a pattern"))
342 return checkstatus(repo, subset, pat, 0)
350 return checkstatus(repo, subset, pat, 0)
343
351
344 def adds(repo, subset, x):
352 def adds(repo, subset, x):
345 pat = getstring(x, _("adds wants a pattern"))
353 pat = getstring(x, _("adds wants a pattern"))
346 return checkstatus(repo, subset, pat, 1)
354 return checkstatus(repo, subset, pat, 1)
347
355
348 def removes(repo, subset, x):
356 def removes(repo, subset, x):
349 pat = getstring(x, _("removes wants a pattern"))
357 pat = getstring(x, _("removes wants a pattern"))
350 return checkstatus(repo, subset, pat, 2)
358 return checkstatus(repo, subset, pat, 2)
351
359
352 def merge(repo, subset, x):
360 def merge(repo, subset, x):
353 getargs(x, 0, 0, _("merge takes no arguments"))
361 getargs(x, 0, 0, _("merge takes no arguments"))
354 cl = repo.changelog
362 cl = repo.changelog
355 return [r for r in subset if cl.parentrevs(r)[1] != -1]
363 return [r for r in subset if cl.parentrevs(r)[1] != -1]
356
364
357 def closed(repo, subset, x):
365 def closed(repo, subset, x):
358 getargs(x, 0, 0, _("closed takes no arguments"))
366 getargs(x, 0, 0, _("closed takes no arguments"))
359 return [r for r in subset if repo[r].extra().get('close')]
367 return [r for r in subset if repo[r].extra().get('close')]
360
368
361 def head(repo, subset, x):
369 def head(repo, subset, x):
362 getargs(x, 0, 0, _("head takes no arguments"))
370 getargs(x, 0, 0, _("head takes no arguments"))
363 hs = set()
371 hs = set()
364 for b, ls in repo.branchmap().iteritems():
372 for b, ls in repo.branchmap().iteritems():
365 hs.update(repo[h].rev() for h in ls)
373 hs.update(repo[h].rev() for h in ls)
366 return [r for r in subset if r in hs]
374 return [r for r in subset if r in hs]
367
375
368 def reverse(repo, subset, x):
376 def reverse(repo, subset, x):
369 l = getset(repo, subset, x)
377 l = getset(repo, subset, x)
370 l.reverse()
378 l.reverse()
371 return l
379 return l
372
380
373 def sort(repo, subset, x):
381 def sort(repo, subset, x):
374 l = getargs(x, 1, 2, _("sort wants one or two arguments"))
382 l = getargs(x, 1, 2, _("sort wants one or two arguments"))
375 keys = "rev"
383 keys = "rev"
376 if len(l) == 2:
384 if len(l) == 2:
377 keys = getstring(l[1], _("sort spec must be a string"))
385 keys = getstring(l[1], _("sort spec must be a string"))
378
386
379 s = l[0]
387 s = l[0]
380 keys = keys.split()
388 keys = keys.split()
381 l = []
389 l = []
382 def invert(s):
390 def invert(s):
383 return "".join(chr(255 - ord(c)) for c in s)
391 return "".join(chr(255 - ord(c)) for c in s)
384 for r in getset(repo, subset, s):
392 for r in getset(repo, subset, s):
385 c = repo[r]
393 c = repo[r]
386 e = []
394 e = []
387 for k in keys:
395 for k in keys:
388 if k == 'rev':
396 if k == 'rev':
389 e.append(r)
397 e.append(r)
390 elif k == '-rev':
398 elif k == '-rev':
391 e.append(-r)
399 e.append(-r)
392 elif k == 'branch':
400 elif k == 'branch':
393 e.append(c.branch())
401 e.append(c.branch())
394 elif k == '-branch':
402 elif k == '-branch':
395 e.append(invert(c.branch()))
403 e.append(invert(c.branch()))
396 elif k == 'desc':
404 elif k == 'desc':
397 e.append(c.description())
405 e.append(c.description())
398 elif k == '-desc':
406 elif k == '-desc':
399 e.append(invert(c.description()))
407 e.append(invert(c.description()))
400 elif k in 'user author':
408 elif k in 'user author':
401 e.append(c.user())
409 e.append(c.user())
402 elif k in '-user -author':
410 elif k in '-user -author':
403 e.append(invert(c.user()))
411 e.append(invert(c.user()))
404 elif k == 'date':
412 elif k == 'date':
405 e.append(c.date()[0])
413 e.append(c.date()[0])
406 elif k == '-date':
414 elif k == '-date':
407 e.append(-c.date()[0])
415 e.append(-c.date()[0])
408 else:
416 else:
409 raise error.ParseError(_("unknown sort key %r") % k)
417 raise error.ParseError(_("unknown sort key %r") % k)
410 e.append(r)
418 e.append(r)
411 l.append(e)
419 l.append(e)
412 l.sort()
420 l.sort()
413 return [e[-1] for e in l]
421 return [e[-1] for e in l]
414
422
415 def getall(repo, subset, x):
423 def getall(repo, subset, x):
416 getargs(x, 0, 0, _("all takes no arguments"))
424 getargs(x, 0, 0, _("all takes no arguments"))
417 return subset
425 return subset
418
426
419 def heads(repo, subset, x):
427 def heads(repo, subset, x):
420 s = getset(repo, subset, x)
428 s = getset(repo, subset, x)
421 ps = set(parents(repo, subset, x))
429 ps = set(parents(repo, subset, x))
422 return [r for r in s if r not in ps]
430 return [r for r in s if r not in ps]
423
431
424 def roots(repo, subset, x):
432 def roots(repo, subset, x):
425 s = getset(repo, subset, x)
433 s = getset(repo, subset, x)
426 cs = set(children(repo, subset, x))
434 cs = set(children(repo, subset, x))
427 return [r for r in s if r not in cs]
435 return [r for r in s if r not in cs]
428
436
429 def outgoing(repo, subset, x):
437 def outgoing(repo, subset, x):
430 import hg # avoid start-up nasties
438 import hg # avoid start-up nasties
431 l = getargs(x, 0, 1, _("outgoing wants a repository path"))
439 l = getargs(x, 0, 1, _("outgoing wants a repository path"))
432 dest = l[1:] or ''
440 dest = l[1:] or ''
433 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
441 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
434 dest, branches = hg.parseurl(dest)
442 dest, branches = hg.parseurl(dest)
435 other = hg.repository(hg.remoteui(repo, {}), dest)
443 other = hg.repository(hg.remoteui(repo, {}), dest)
436 repo.ui.pushbuffer()
444 repo.ui.pushbuffer()
437 o = discovery.findoutgoing(repo, other)
445 o = discovery.findoutgoing(repo, other)
438 repo.ui.popbuffer()
446 repo.ui.popbuffer()
439 cl = repo.changelog
447 cl = repo.changelog
440 o = set([cl.rev(r) for r in repo.changelog.nodesbetween(o, None)[0]])
448 o = set([cl.rev(r) for r in repo.changelog.nodesbetween(o, None)[0]])
441 return [r for r in subset if r in o]
449 return [r for r in subset if r in o]
442
450
443 def tagged(repo, subset, x):
451 def tagged(repo, subset, x):
444 getargs(x, 0, 0, _("tagged takes no arguments"))
452 getargs(x, 0, 0, _("tagged takes no arguments"))
445 cl = repo.changelog
453 cl = repo.changelog
446 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
454 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
447 return [r for r in subset if r in s]
455 return [r for r in subset if r in s]
448
456
449 symbols = {
457 symbols = {
450 "adds": adds,
458 "adds": adds,
451 "all": getall,
459 "all": getall,
452 "ancestor": ancestor,
460 "ancestor": ancestor,
453 "ancestors": ancestors,
461 "ancestors": ancestors,
454 "author": author,
462 "author": author,
455 "branch": branch,
463 "branch": branch,
456 "children": children,
464 "children": children,
457 "closed": closed,
465 "closed": closed,
458 "contains": contains,
466 "contains": contains,
459 "date": date,
467 "date": date,
460 "descendants": descendants,
468 "descendants": descendants,
461 "file": hasfile,
469 "file": hasfile,
462 "follow": follow,
470 "follow": follow,
463 "grep": grep,
471 "grep": grep,
464 "head": head,
472 "head": head,
465 "heads": heads,
473 "heads": heads,
466 "keyword": keyword,
474 "keyword": keyword,
467 "limit": limit,
475 "limit": limit,
468 "max": maxrev,
476 "max": maxrev,
477 "min": minrev,
469 "merge": merge,
478 "merge": merge,
470 "modifies": modifies,
479 "modifies": modifies,
471 "outgoing": outgoing,
480 "outgoing": outgoing,
472 "p1": p1,
481 "p1": p1,
473 "p2": p2,
482 "p2": p2,
474 "parents": parents,
483 "parents": parents,
475 "removes": removes,
484 "removes": removes,
476 "reverse": reverse,
485 "reverse": reverse,
477 "roots": roots,
486 "roots": roots,
478 "sort": sort,
487 "sort": sort,
479 "tagged": tagged,
488 "tagged": tagged,
480 "user": author,
489 "user": author,
481 }
490 }
482
491
483 methods = {
492 methods = {
484 "range": rangeset,
493 "range": rangeset,
485 "string": stringset,
494 "string": stringset,
486 "symbol": symbolset,
495 "symbol": symbolset,
487 "and": andset,
496 "and": andset,
488 "or": orset,
497 "or": orset,
489 "not": notset,
498 "not": notset,
490 "list": listset,
499 "list": listset,
491 "func": func,
500 "func": func,
492 }
501 }
493
502
494 def optimize(x, small):
503 def optimize(x, small):
495 if x == None:
504 if x == None:
496 return 0, x
505 return 0, x
497
506
498 smallbonus = 1
507 smallbonus = 1
499 if small:
508 if small:
500 smallbonus = .5
509 smallbonus = .5
501
510
502 op = x[0]
511 op = x[0]
503 if op == 'minus':
512 if op == 'minus':
504 return optimize(('and', x[1], ('not', x[2])), small)
513 return optimize(('and', x[1], ('not', x[2])), small)
505 elif op == 'dagrange':
514 elif op == 'dagrange':
506 return optimize(('and', ('func', ('symbol', 'descendants'), x[1]),
515 return optimize(('and', ('func', ('symbol', 'descendants'), x[1]),
507 ('func', ('symbol', 'ancestors'), x[2])), small)
516 ('func', ('symbol', 'ancestors'), x[2])), small)
508 elif op == 'dagrangepre':
517 elif op == 'dagrangepre':
509 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
518 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
510 elif op == 'dagrangepost':
519 elif op == 'dagrangepost':
511 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
520 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
512 elif op == 'rangepre':
521 elif op == 'rangepre':
513 return optimize(('range', ('string', '0'), x[1]), small)
522 return optimize(('range', ('string', '0'), x[1]), small)
514 elif op == 'rangepost':
523 elif op == 'rangepost':
515 return optimize(('range', x[1], ('string', 'tip')), small)
524 return optimize(('range', x[1], ('string', 'tip')), small)
516 elif op == 'negate':
525 elif op == 'negate':
517 return optimize(('string',
526 return optimize(('string',
518 '-' + getstring(x[1], _("can't negate that"))), small)
527 '-' + getstring(x[1], _("can't negate that"))), small)
519 elif op in 'string symbol negate':
528 elif op in 'string symbol negate':
520 return smallbonus, x # single revisions are small
529 return smallbonus, x # single revisions are small
521 elif op == 'and' or op == 'dagrange':
530 elif op == 'and' or op == 'dagrange':
522 wa, ta = optimize(x[1], True)
531 wa, ta = optimize(x[1], True)
523 wb, tb = optimize(x[2], True)
532 wb, tb = optimize(x[2], True)
524 w = min(wa, wb)
533 w = min(wa, wb)
525 if wa > wb:
534 if wa > wb:
526 return w, (op, tb, ta)
535 return w, (op, tb, ta)
527 return w, (op, ta, tb)
536 return w, (op, ta, tb)
528 elif op == 'or':
537 elif op == 'or':
529 wa, ta = optimize(x[1], False)
538 wa, ta = optimize(x[1], False)
530 wb, tb = optimize(x[2], False)
539 wb, tb = optimize(x[2], False)
531 if wb < wa:
540 if wb < wa:
532 wb, wa = wa, wb
541 wb, wa = wa, wb
533 return max(wa, wb), (op, ta, tb)
542 return max(wa, wb), (op, ta, tb)
534 elif op == 'not':
543 elif op == 'not':
535 o = optimize(x[1], not small)
544 o = optimize(x[1], not small)
536 return o[0], (op, o[1])
545 return o[0], (op, o[1])
537 elif op == 'group':
546 elif op == 'group':
538 return optimize(x[1], small)
547 return optimize(x[1], small)
539 elif op in 'range list':
548 elif op in 'range list':
540 wa, ta = optimize(x[1], small)
549 wa, ta = optimize(x[1], small)
541 wb, tb = optimize(x[2], small)
550 wb, tb = optimize(x[2], small)
542 return wa + wb, (op, ta, tb)
551 return wa + wb, (op, ta, tb)
543 elif op == 'func':
552 elif op == 'func':
544 f = getstring(x[1], _("not a symbol"))
553 f = getstring(x[1], _("not a symbol"))
545 wa, ta = optimize(x[2], small)
554 wa, ta = optimize(x[2], small)
546 if f in "grep date user author keyword branch file":
555 if f in "grep date user author keyword branch file":
547 w = 10 # slow
556 w = 10 # slow
548 elif f in "modifies adds removes outgoing":
557 elif f in "modifies adds removes outgoing":
549 w = 30 # slower
558 w = 30 # slower
550 elif f == "contains":
559 elif f == "contains":
551 w = 100 # very slow
560 w = 100 # very slow
552 elif f == "ancestor":
561 elif f == "ancestor":
553 w = 1 * smallbonus
562 w = 1 * smallbonus
554 elif f == "reverse limit":
563 elif f == "reverse limit":
555 w = 0
564 w = 0
556 elif f in "sort":
565 elif f in "sort":
557 w = 10 # assume most sorts look at changelog
566 w = 10 # assume most sorts look at changelog
558 else:
567 else:
559 w = 1
568 w = 1
560 return w + wa, (op, x[1], ta)
569 return w + wa, (op, x[1], ta)
561 return 1, x
570 return 1, x
562
571
563 parse = parser.parser(tokenize, elements).parse
572 parse = parser.parser(tokenize, elements).parse
564
573
565 def match(spec):
574 def match(spec):
566 if not spec:
575 if not spec:
567 raise error.ParseError(_("empty query"))
576 raise error.ParseError(_("empty query"))
568 tree = parse(spec)
577 tree = parse(spec)
569 weight, tree = optimize(tree, True)
578 weight, tree = optimize(tree, True)
570 def mfunc(repo, subset):
579 def mfunc(repo, subset):
571 return getset(repo, subset, tree)
580 return getset(repo, subset, tree)
572 return mfunc
581 return mfunc
@@ -1,103 +1,106 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 echo "[extensions]" >> $HGRCPATH
3 echo "[extensions]" >> $HGRCPATH
4 echo "bookmarks=" >> $HGRCPATH
4 echo "bookmarks=" >> $HGRCPATH
5
5
6 hg init
6 hg init
7
7
8 echo % no bookmarks
8 echo % no bookmarks
9 hg bookmarks
9 hg bookmarks
10
10
11 echo % bookmark rev -1
11 echo % bookmark rev -1
12 hg bookmark X
12 hg bookmark X
13
13
14 echo % list bookmarks
14 echo % list bookmarks
15 hg bookmarks
15 hg bookmarks
16
16
17 echo % list bookmarks with color
17 echo % list bookmarks with color
18 hg --config extensions.color= --config color.mode=ansi \
18 hg --config extensions.color= --config color.mode=ansi \
19 bookmarks --color=always
19 bookmarks --color=always
20
20
21 echo a > a
21 echo a > a
22 hg add a
22 hg add a
23 hg commit -m 0
23 hg commit -m 0
24
24
25 echo % bookmark X moved to rev 0
25 echo % bookmark X moved to rev 0
26 hg bookmarks
26 hg bookmarks
27
27
28 echo % look up bookmark
28 echo % look up bookmark
29 hg log -r X
29 hg log -r X
30
30
31 echo % second bookmark for rev 0
31 echo % second bookmark for rev 0
32 hg bookmark X2
32 hg bookmark X2
33
33
34 echo % bookmark rev -1 again
34 echo % bookmark rev -1 again
35 hg bookmark -r null Y
35 hg bookmark -r null Y
36
36
37 echo % list bookmarks
37 echo % list bookmarks
38 hg bookmarks
38 hg bookmarks
39
39
40 echo b > b
40 echo b > b
41 hg add b
41 hg add b
42 hg commit -m 1
42 hg commit -m 1
43
43
44 echo % bookmarks X and X2 moved to rev 1, Y at rev -1
44 echo % bookmarks X and X2 moved to rev 1, Y at rev -1
45 hg bookmarks
45 hg bookmarks
46
46
47 echo % bookmark rev 0 again
47 echo % bookmark rev 0 again
48 hg bookmark -r 0 Z
48 hg bookmark -r 0 Z
49
49
50 echo c > c
50 echo c > c
51 hg add c
51 hg add c
52 hg commit -m 2
52 hg commit -m 2
53
53
54 echo % bookmarks X and X2 moved to rev 2, Y at rev -1, Z at rev 0
54 echo % bookmarks X and X2 moved to rev 2, Y at rev -1, Z at rev 0
55 hg bookmarks
55 hg bookmarks
56
56
57 echo % rename nonexistent bookmark
57 echo % rename nonexistent bookmark
58 hg bookmark -m A B
58 hg bookmark -m A B
59
59
60 echo % rename to existent bookmark
60 echo % rename to existent bookmark
61 hg bookmark -m X Y
61 hg bookmark -m X Y
62
62
63 echo % force rename to existent bookmark
63 echo % force rename to existent bookmark
64 hg bookmark -f -m X Y
64 hg bookmark -f -m X Y
65
65
66 echo % list bookmarks
66 echo % list bookmarks
67 hg bookmark
67 hg bookmark
68
68
69 echo % rename without new name
69 echo % rename without new name
70 hg bookmark -m Y
70 hg bookmark -m Y
71
71
72 echo % delete without name
72 echo % delete without name
73 hg bookmark -d
73 hg bookmark -d
74
74
75 echo % delete nonexistent bookmark
75 echo % delete nonexistent bookmark
76 hg bookmark -d A
76 hg bookmark -d A
77
77
78 echo % bookmark name with spaces should be stripped
78 echo % bookmark name with spaces should be stripped
79 hg bookmark ' x y '
79 hg bookmark ' x y '
80
80
81 echo % list bookmarks
81 echo % list bookmarks
82 hg bookmarks
82 hg bookmarks
83
83
84 echo % look up stripped bookmark name
84 echo % look up stripped bookmark name
85 hg log -r '"x y"'
85 hg log -r '"x y"'
86
86
87 echo % reject bookmark name with newline
87 echo % reject bookmark name with newline
88 hg bookmark '
88 hg bookmark '
89 '
89 '
90
90
91 echo % bookmark with existing name
91 echo % bookmark with existing name
92 hg bookmark Z
92 hg bookmark Z
93
93
94 echo % force bookmark with existing name
94 echo % force bookmark with existing name
95 hg bookmark -f Z
95 hg bookmark -f Z
96
96
97 echo % list bookmarks
97 echo % list bookmarks
98 hg bookmark
98 hg bookmark
99
99
100 echo % revision but no bookmark name
100 echo % revision but no bookmark name
101 hg bookmark -r .
101 hg bookmark -r .
102
102
103 echo % bookmark name with whitespace only
104 hg bookmark ' '
105
103 true
106 true
@@ -1,76 +1,78 b''
1 % no bookmarks
1 % no bookmarks
2 no bookmarks set
2 no bookmarks set
3 % bookmark rev -1
3 % bookmark rev -1
4 % list bookmarks
4 % list bookmarks
5 * X -1:000000000000
5 * X -1:000000000000
6 % list bookmarks with color
6 % list bookmarks with color
7  * X -1:000000000000
7  * X -1:000000000000
8 % bookmark X moved to rev 0
8 % bookmark X moved to rev 0
9 * X 0:f7b1eb17ad24
9 * X 0:f7b1eb17ad24
10 % look up bookmark
10 % look up bookmark
11 changeset: 0:f7b1eb17ad24
11 changeset: 0:f7b1eb17ad24
12 tag: X
12 tag: X
13 tag: tip
13 tag: tip
14 user: test
14 user: test
15 date: Thu Jan 01 00:00:00 1970 +0000
15 date: Thu Jan 01 00:00:00 1970 +0000
16 summary: 0
16 summary: 0
17
17
18 % second bookmark for rev 0
18 % second bookmark for rev 0
19 % bookmark rev -1 again
19 % bookmark rev -1 again
20 % list bookmarks
20 % list bookmarks
21 * X2 0:f7b1eb17ad24
21 * X2 0:f7b1eb17ad24
22 * X 0:f7b1eb17ad24
22 * X 0:f7b1eb17ad24
23 Y -1:000000000000
23 Y -1:000000000000
24 % bookmarks X and X2 moved to rev 1, Y at rev -1
24 % bookmarks X and X2 moved to rev 1, Y at rev -1
25 * X2 1:925d80f479bb
25 * X2 1:925d80f479bb
26 * X 1:925d80f479bb
26 * X 1:925d80f479bb
27 Y -1:000000000000
27 Y -1:000000000000
28 % bookmark rev 0 again
28 % bookmark rev 0 again
29 % bookmarks X and X2 moved to rev 2, Y at rev -1, Z at rev 0
29 % bookmarks X and X2 moved to rev 2, Y at rev -1, Z at rev 0
30 * X2 2:0316ce92851d
30 * X2 2:0316ce92851d
31 * X 2:0316ce92851d
31 * X 2:0316ce92851d
32 Z 0:f7b1eb17ad24
32 Z 0:f7b1eb17ad24
33 Y -1:000000000000
33 Y -1:000000000000
34 % rename nonexistent bookmark
34 % rename nonexistent bookmark
35 abort: a bookmark of this name does not exist
35 abort: a bookmark of this name does not exist
36 % rename to existent bookmark
36 % rename to existent bookmark
37 abort: a bookmark of the same name already exists
37 abort: a bookmark of the same name already exists
38 % force rename to existent bookmark
38 % force rename to existent bookmark
39 % list bookmarks
39 % list bookmarks
40 * X2 2:0316ce92851d
40 * X2 2:0316ce92851d
41 * Y 2:0316ce92851d
41 * Y 2:0316ce92851d
42 Z 0:f7b1eb17ad24
42 Z 0:f7b1eb17ad24
43 % rename without new name
43 % rename without new name
44 abort: new bookmark name required
44 abort: new bookmark name required
45 % delete without name
45 % delete without name
46 abort: bookmark name required
46 abort: bookmark name required
47 % delete nonexistent bookmark
47 % delete nonexistent bookmark
48 abort: a bookmark of this name does not exist
48 abort: a bookmark of this name does not exist
49 % bookmark name with spaces should be stripped
49 % bookmark name with spaces should be stripped
50 % list bookmarks
50 % list bookmarks
51 * X2 2:0316ce92851d
51 * X2 2:0316ce92851d
52 * Y 2:0316ce92851d
52 * Y 2:0316ce92851d
53 Z 0:f7b1eb17ad24
53 Z 0:f7b1eb17ad24
54 * x y 2:0316ce92851d
54 * x y 2:0316ce92851d
55 % look up stripped bookmark name
55 % look up stripped bookmark name
56 changeset: 2:0316ce92851d
56 changeset: 2:0316ce92851d
57 tag: X2
57 tag: X2
58 tag: Y
58 tag: Y
59 tag: tip
59 tag: tip
60 tag: x y
60 tag: x y
61 user: test
61 user: test
62 date: Thu Jan 01 00:00:00 1970 +0000
62 date: Thu Jan 01 00:00:00 1970 +0000
63 summary: 2
63 summary: 2
64
64
65 % reject bookmark name with newline
65 % reject bookmark name with newline
66 abort: bookmark name cannot contain newlines
66 abort: bookmark name cannot contain newlines
67 % bookmark with existing name
67 % bookmark with existing name
68 abort: a bookmark of the same name already exists
68 abort: a bookmark of the same name already exists
69 % force bookmark with existing name
69 % force bookmark with existing name
70 % list bookmarks
70 % list bookmarks
71 * X2 2:0316ce92851d
71 * X2 2:0316ce92851d
72 * Y 2:0316ce92851d
72 * Y 2:0316ce92851d
73 * Z 2:0316ce92851d
73 * Z 2:0316ce92851d
74 * x y 2:0316ce92851d
74 * x y 2:0316ce92851d
75 % revision but no bookmark name
75 % revision but no bookmark name
76 abort: bookmark name required
76 abort: bookmark name required
77 % bookmark name with whitespace only
78 abort: bookmark names cannot consist entirely of whitespace
@@ -1,111 +1,127 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 cat > writelines.py <<EOF
3 cat > writelines.py <<EOF
4 import sys
4 import sys
5 path = sys.argv[1]
5 path = sys.argv[1]
6 args = sys.argv[2:]
6 args = sys.argv[2:]
7 assert (len(args) % 2) == 0
7 assert (len(args) % 2) == 0
8
8
9 f = file(path, 'wb')
9 f = file(path, 'wb')
10 for i in xrange(len(args)/2):
10 for i in xrange(len(args)/2):
11 count, s = args[2*i:2*i+2]
11 count, s = args[2*i:2*i+2]
12 count = int(count)
12 count = int(count)
13 s = s.decode('string_escape')
13 s = s.decode('string_escape')
14 f.write(s*count)
14 f.write(s*count)
15 f.close()
15 f.close()
16
16
17 EOF
17 EOF
18
18
19 echo "[extensions]" >> $HGRCPATH
19 echo "[extensions]" >> $HGRCPATH
20 echo "mq=" >> $HGRCPATH
20 echo "mq=" >> $HGRCPATH
21 echo "[diff]" >> $HGRCPATH
21 echo "[diff]" >> $HGRCPATH
22 echo "git=1" >> $HGRCPATH
22 echo "git=1" >> $HGRCPATH
23
23
24 hg init repo
24 hg init repo
25 cd repo
25 cd repo
26
26
27 echo % qimport non-existing-file
27 echo % qimport non-existing-file
28 hg qimport non-existing-file
28 hg qimport non-existing-file
29
29
30 echo % import email
30 echo % import email
31 hg qimport --push -n email - <<EOF
31 hg qimport --push -n email - <<EOF
32 From: Username in email <test@example.net>
32 From: Username in email <test@example.net>
33 Subject: [PATCH] Message in email
33 Subject: [PATCH] Message in email
34 Date: Fri, 02 Jan 1970 00:00:00 +0000
34 Date: Fri, 02 Jan 1970 00:00:00 +0000
35
35
36 Text before patch.
36 Text before patch.
37
37
38 # HG changeset patch
38 # HG changeset patch
39 # User Username in patch <test@example.net>
39 # User Username in patch <test@example.net>
40 # Date 0 0
40 # Date 0 0
41 # Node ID 1a706973a7d84cb549823634a821d9bdf21c6220
41 # Node ID 1a706973a7d84cb549823634a821d9bdf21c6220
42 # Parent 0000000000000000000000000000000000000000
42 # Parent 0000000000000000000000000000000000000000
43 First line of commit message.
43 First line of commit message.
44
44
45 More text in commit message.
45 More text in commit message.
46 --- confuse the diff detection
46 --- confuse the diff detection
47
47
48 diff --git a/x b/x
48 diff --git a/x b/x
49 new file mode 100644
49 new file mode 100644
50 --- /dev/null
50 --- /dev/null
51 +++ b/x
51 +++ b/x
52 @@ -0,0 +1,1 @@
52 @@ -0,0 +1,1 @@
53 +new file
53 +new file
54 Text after patch.
54 Text after patch.
55
55
56 EOF
56 EOF
57
57
58 echo % hg tip -v
58 echo % hg tip -v
59 hg tip -v
59 hg tip -v
60 hg qpop
60 hg qpop
61 hg qdelete email
61 hg qdelete email
62
62
63 echo % import URL
63 echo % import URL
64 echo foo >> foo
64 echo foo >> foo
65 hg add foo
65 hg add foo
66 hg diff > $HGTMP/url.diff
66 hg diff > $HGTMP/url.diff
67 hg revert --no-backup foo
67 hg revert --no-backup foo
68 rm foo
68 rm foo
69 # Under unix: file:///foobar/blah
69 # Under unix: file:///foobar/blah
70 # Under windows: file:///c:/foobar/blah
70 # Under windows: file:///c:/foobar/blah
71 patchurl=`echo "$HGTMP"/url.diff | tr '\\\\' /`
71 patchurl=`echo "$HGTMP"/url.diff | tr '\\\\' /`
72 expr "$patchurl" : "\/" > /dev/null
72 expr "$patchurl" : "\/" > /dev/null
73 if [ $? -ne 0 ]; then
73 if [ $? -ne 0 ]; then
74 patchurl="/$patchurl"
74 patchurl="/$patchurl"
75 fi
75 fi
76 hg qimport file://"$patchurl"
76 hg qimport file://"$patchurl"
77 hg qun
77 hg qun
78
78
79 echo % import patch that already exists
79 echo % import patch that already exists
80 echo foo2 >> foo
80 echo foo2 >> foo
81 hg add foo
81 hg add foo
82 hg diff > ../url.diff
82 hg diff > ../url.diff
83 hg revert --no-backup foo
83 hg revert --no-backup foo
84 rm foo
84 rm foo
85 hg qimport ../url.diff
85 hg qimport ../url.diff
86 hg qpush
86 hg qpush
87 cat foo
87 cat foo
88 hg qpop
88 hg qpop
89 echo % qimport -f
89 echo % qimport -f
90 hg qimport -f ../url.diff
90 hg qimport -f ../url.diff
91 hg qpush
91 hg qpush
92 cat foo
92 cat foo
93 hg qpop
93 hg qpop
94
94
95 echo % build diff with CRLF
95 echo % build diff with CRLF
96 python ../writelines.py b 5 'a\n' 5 'a\r\n'
96 python ../writelines.py b 5 'a\n' 5 'a\r\n'
97 hg ci -Am addb
97 hg ci -Am addb
98 python ../writelines.py b 2 'a\n' 10 'b\n' 2 'a\r\n'
98 python ../writelines.py b 2 'a\n' 10 'b\n' 2 'a\r\n'
99 hg diff > b.diff
99 hg diff > b.diff
100 hg up -C
100 hg up -C
101 echo % qimport CRLF diff
101 echo % qimport CRLF diff
102 hg qimport b.diff
102 hg qimport b.diff
103 hg qpush
103 hg qpush
104
104
105 echo % try to import --push
105 echo % try to import --push
106 echo another >> b
106 echo another >> b
107 hg diff > another.diff
107 hg diff > another.diff
108 hg up -C
108 hg up -C
109 hg qimport --push another.diff
109 hg qimport --push another.diff
110 hg qfin -a
110 hg qfin -a
111 hg qimport -rtip -P
111 hg qimport -rtip -P
112
113 hg qpop -a
114 hg qdel -k 2.diff
115 echo % qimport -e
116 hg qimport -e 2.diff
117 hg qdel -k 2.diff
118 echo % qimport -e --name newname oldexisitingpatch
119 hg qimport -e --name this-name-is-better 2.diff
120 hg qser
121 echo % qimport -e --name without --force
122 cp .hg/patches/this-name-is-better .hg/patches/3.diff
123 hg qimport -e --name this-name-is-better 3.diff
124 hg qser
125 echo % qimport -e --name with --force
126 hg qimport --force -e --name this-name-is-better 3.diff
127 hg qser
@@ -1,54 +1,72 b''
1 % qimport non-existing-file
1 % qimport non-existing-file
2 abort: unable to read non-existing-file
2 abort: unable to read non-existing-file
3 % import email
3 % import email
4 adding email to series file
4 adding email to series file
5 applying email
5 applying email
6 now at: email
6 now at: email
7 % hg tip -v
7 % hg tip -v
8 changeset: 0:1a706973a7d8
8 changeset: 0:1a706973a7d8
9 tag: email
9 tag: email
10 tag: qbase
10 tag: qbase
11 tag: qtip
11 tag: qtip
12 tag: tip
12 tag: tip
13 user: Username in patch <test@example.net>
13 user: Username in patch <test@example.net>
14 date: Thu Jan 01 00:00:00 1970 +0000
14 date: Thu Jan 01 00:00:00 1970 +0000
15 files: x
15 files: x
16 description:
16 description:
17 First line of commit message.
17 First line of commit message.
18
18
19 More text in commit message.
19 More text in commit message.
20
20
21
21
22 popping email
22 popping email
23 patch queue now empty
23 patch queue now empty
24 % import URL
24 % import URL
25 adding url.diff to series file
25 adding url.diff to series file
26 url.diff
26 url.diff
27 % import patch that already exists
27 % import patch that already exists
28 abort: patch "url.diff" already exists
28 abort: patch "url.diff" already exists
29 applying url.diff
29 applying url.diff
30 now at: url.diff
30 now at: url.diff
31 foo
31 foo
32 popping url.diff
32 popping url.diff
33 patch queue now empty
33 patch queue now empty
34 % qimport -f
34 % qimport -f
35 adding url.diff to series file
35 adding url.diff to series file
36 applying url.diff
36 applying url.diff
37 now at: url.diff
37 now at: url.diff
38 foo2
38 foo2
39 popping url.diff
39 popping url.diff
40 patch queue now empty
40 patch queue now empty
41 % build diff with CRLF
41 % build diff with CRLF
42 adding b
42 adding b
43 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
44 % qimport CRLF diff
44 % qimport CRLF diff
45 adding b.diff to series file
45 adding b.diff to series file
46 applying b.diff
46 applying b.diff
47 now at: b.diff
47 now at: b.diff
48 % try to import --push
48 % try to import --push
49 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
50 adding another.diff to series file
50 adding another.diff to series file
51 applying another.diff
51 applying another.diff
52 now at: another.diff
52 now at: another.diff
53 patch b.diff finalized without changeset message
53 patch b.diff finalized without changeset message
54 patch another.diff finalized without changeset message
54 patch another.diff finalized without changeset message
55 popping 2.diff
56 patch queue now empty
57 % qimport -e
58 adding 2.diff to series file
59 % qimport -e --name newname oldexisitingpatch
60 renaming 2.diff to this-name-is-better
61 adding this-name-is-better to series file
62 this-name-is-better
63 url.diff
64 % qimport -e --name without --force
65 abort: patch "this-name-is-better" already exists
66 this-name-is-better
67 url.diff
68 % qimport -e --name with --force
69 renaming 3.diff to this-name-is-better
70 adding this-name-is-better to series file
71 this-name-is-better
72 url.diff
@@ -1,137 +1,138 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 HGENCODING=utf-8
3 HGENCODING=utf-8
4 export HGENCODING
4 export HGENCODING
5
5
6 try() {
6 try() {
7 echo '% hg debugrevspec' $@
7 echo '% hg debugrevspec' $@
8 hg debugrevspec --debug $@
8 hg debugrevspec --debug $@
9 }
9 }
10
10
11 log() {
11 log() {
12 echo "% log '$1'"
12 echo "% log '$1'"
13 hg log --template '{rev}\n' -r "$1"
13 hg log --template '{rev}\n' -r "$1"
14 }
14 }
15
15
16 hg init repo
16 hg init repo
17 cd repo
17 cd repo
18
18
19 echo a > a
19 echo a > a
20 hg branch a
20 hg branch a
21 hg ci -Aqm0
21 hg ci -Aqm0
22
22
23 echo b > b
23 echo b > b
24 hg branch b
24 hg branch b
25 hg ci -Aqm1
25 hg ci -Aqm1
26
26
27 rm a
27 rm a
28 hg branch a-b-c-
28 hg branch a-b-c-
29 hg ci -Aqm2 -u Bob
29 hg ci -Aqm2 -u Bob
30
30
31 hg co 1
31 hg co 1
32 hg branch +a+b+c+
32 hg branch +a+b+c+
33 hg ci -Aqm3
33 hg ci -Aqm3
34
34
35 hg co 2 # interleave
35 hg co 2 # interleave
36 echo bb > b
36 echo bb > b
37 hg branch -- -a-b-c-
37 hg branch -- -a-b-c-
38 hg ci -Aqm4 -d "May 12 2005"
38 hg ci -Aqm4 -d "May 12 2005"
39
39
40 hg co 3
40 hg co 3
41 hg branch /a/b/c/
41 hg branch /a/b/c/
42 hg ci -Aqm"5 bug"
42 hg ci -Aqm"5 bug"
43
43
44 hg merge 4
44 hg merge 4
45 hg branch _a_b_c_
45 hg branch _a_b_c_
46 hg ci -Aqm"6 issue619"
46 hg ci -Aqm"6 issue619"
47
47
48 hg branch .a.b.c.
48 hg branch .a.b.c.
49 hg ci -Aqm7
49 hg ci -Aqm7
50
50
51 hg branch all
51 hg branch all
52 hg ci --close-branch -Aqm8
52 hg ci --close-branch -Aqm8
53
53
54 hg co 4
54 hg co 4
55 hg branch Γ©
55 hg branch Γ©
56 hg ci -Aqm9
56 hg ci -Aqm9
57
57
58 hg tag -r6 1.0
58 hg tag -r6 1.0
59
59
60 # names that should work without quoting
60 # names that should work without quoting
61 try a
61 try a
62 try b-a
62 try b-a
63 try _a_b_c_
63 try _a_b_c_
64 try _a_b_c_-a
64 try _a_b_c_-a
65 try .a.b.c.
65 try .a.b.c.
66 try .a.b.c.-a
66 try .a.b.c.-a
67 try -- '-a-b-c-' # complains
67 try -- '-a-b-c-' # complains
68 log -a-b-c- # succeeds with fallback
68 log -a-b-c- # succeeds with fallback
69 try -- -a-b-c--a # complains
69 try -- -a-b-c--a # complains
70 try Γ©
70 try Γ©
71
71
72 # quoting needed
72 # quoting needed
73 try '"-a-b-c-"-a'
73 try '"-a-b-c-"-a'
74
74
75 log '1 or 2'
75 log '1 or 2'
76 log '1|2'
76 log '1|2'
77 log '1 and 2'
77 log '1 and 2'
78 log '1&2'
78 log '1&2'
79 try '1&2|3' # precedence - and is higher
79 try '1&2|3' # precedence - and is higher
80 try '1|2&3'
80 try '1|2&3'
81 try '1&2&3' # associativity
81 try '1&2&3' # associativity
82 try '1|(2|3)'
82 try '1|(2|3)'
83 log '1.0' # tag
83 log '1.0' # tag
84 log 'a' # branch
84 log 'a' # branch
85 log '2785f51ee'
85 log '2785f51ee'
86 log 'date(2005)'
86 log 'date(2005)'
87 log 'date(this is a test)'
87 log 'date(this is a test)'
88 log 'date()'
88 log 'date()'
89 log 'date'
89 log 'date'
90 log 'date('
90 log 'date('
91 log 'date(tip)'
91 log 'date(tip)'
92 log '"date"'
92 log '"date"'
93 log 'date(2005) and 1::'
93 log 'date(2005) and 1::'
94
94
95 log 'ancestor(1)'
95 log 'ancestor(1)'
96 log 'ancestor(4,5)'
96 log 'ancestor(4,5)'
97 log 'ancestor(4,5) and 4'
97 log 'ancestor(4,5) and 4'
98 log 'ancestors(5)'
98 log 'ancestors(5)'
99 log 'author(bob)'
99 log 'author(bob)'
100 log 'branch(Γ©)'
100 log 'branch(Γ©)'
101 log 'children(ancestor(4,5))'
101 log 'children(ancestor(4,5))'
102 log 'closed()'
102 log 'closed()'
103 log 'contains(a)'
103 log 'contains(a)'
104 log 'descendants(2 or 3)'
104 log 'descendants(2 or 3)'
105 log 'file(b)'
105 log 'file(b)'
106 log 'follow()'
106 log 'follow()'
107 log 'grep("issue\d+")'
107 log 'grep("issue\d+")'
108 log 'head()'
108 log 'head()'
109 log 'heads(6::)'
109 log 'heads(6::)'
110 log 'keyword(issue)'
110 log 'keyword(issue)'
111 log 'limit(head(), 1)'
111 log 'limit(head(), 1)'
112 log 'max(contains(a))'
112 log 'max(contains(a))'
113 log 'min(contains(a))'
113 log 'merge()'
114 log 'merge()'
114 log 'modifies(b)'
115 log 'modifies(b)'
115 log 'p1(merge())'
116 log 'p1(merge())'
116 log 'p2(merge())'
117 log 'p2(merge())'
117 log 'parents(merge())'
118 log 'parents(merge())'
118 log 'removes(a)'
119 log 'removes(a)'
119 log 'roots(all())'
120 log 'roots(all())'
120 log 'reverse(2 or 3 or 4 or 5)'
121 log 'reverse(2 or 3 or 4 or 5)'
121 log 'sort(limit(reverse(all()), 3))'
122 log 'sort(limit(reverse(all()), 3))'
122 log 'sort(2 or 3 or 4 or 5, date)'
123 log 'sort(2 or 3 or 4 or 5, date)'
123 log 'tagged()'
124 log 'tagged()'
124 log 'user(bob)'
125 log 'user(bob)'
125
126
126 log '4::8'
127 log '4::8'
127 log '4:8'
128 log '4:8'
128
129
129 log 'sort(!merge() & (modifies(b) | user(bob) | keyword(bug) | keyword(issue) & 1::9), "-date")'
130 log 'sort(!merge() & (modifies(b) | user(bob) | keyword(bug) | keyword(issue) & 1::9), "-date")'
130
131
131 log 'not 0 and 0:2'
132 log 'not 0 and 0:2'
132 log 'not 1 and 0:2'
133 log 'not 1 and 0:2'
133 log 'not 2 and 0:2'
134 log 'not 2 and 0:2'
134 log '(1 and 2)::'
135 log '(1 and 2)::'
135 log '(1 and 2):'
136 log '(1 and 2):'
136 log '(1 and 2):3'
137 log '(1 and 2):3'
137 log 'sort(head(), -rev)'
138 log 'sort(head(), -rev)'
@@ -1,223 +1,225 b''
1 marked working directory as branch a
1 marked working directory as branch a
2 marked working directory as branch b
2 marked working directory as branch b
3 marked working directory as branch a-b-c-
3 marked working directory as branch a-b-c-
4 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
4 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 marked working directory as branch +a+b+c+
5 marked working directory as branch +a+b+c+
6 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
6 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
7 marked working directory as branch -a-b-c-
7 marked working directory as branch -a-b-c-
8 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
8 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
9 marked working directory as branch /a/b/c/
9 marked working directory as branch /a/b/c/
10 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
10 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
11 (branch merge, don't forget to commit)
11 (branch merge, don't forget to commit)
12 marked working directory as branch _a_b_c_
12 marked working directory as branch _a_b_c_
13 marked working directory as branch .a.b.c.
13 marked working directory as branch .a.b.c.
14 marked working directory as branch all
14 marked working directory as branch all
15 abort: can only close branch heads
15 abort: can only close branch heads
16 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 marked working directory as branch Γ©
17 marked working directory as branch Γ©
18 % hg debugrevspec a
18 % hg debugrevspec a
19 ('symbol', 'a')
19 ('symbol', 'a')
20 0
20 0
21 % hg debugrevspec b-a
21 % hg debugrevspec b-a
22 ('minus', ('symbol', 'b'), ('symbol', 'a'))
22 ('minus', ('symbol', 'b'), ('symbol', 'a'))
23 1
23 1
24 % hg debugrevspec _a_b_c_
24 % hg debugrevspec _a_b_c_
25 ('symbol', '_a_b_c_')
25 ('symbol', '_a_b_c_')
26 6
26 6
27 % hg debugrevspec _a_b_c_-a
27 % hg debugrevspec _a_b_c_-a
28 ('minus', ('symbol', '_a_b_c_'), ('symbol', 'a'))
28 ('minus', ('symbol', '_a_b_c_'), ('symbol', 'a'))
29 6
29 6
30 % hg debugrevspec .a.b.c.
30 % hg debugrevspec .a.b.c.
31 ('symbol', '.a.b.c.')
31 ('symbol', '.a.b.c.')
32 7
32 7
33 % hg debugrevspec .a.b.c.-a
33 % hg debugrevspec .a.b.c.-a
34 ('minus', ('symbol', '.a.b.c.'), ('symbol', 'a'))
34 ('minus', ('symbol', '.a.b.c.'), ('symbol', 'a'))
35 7
35 7
36 % hg debugrevspec -- -a-b-c-
36 % hg debugrevspec -- -a-b-c-
37 hg: parse error at 7: not a prefix: end
37 hg: parse error at 7: not a prefix: end
38 % log '-a-b-c-'
38 % log '-a-b-c-'
39 4
39 4
40 % hg debugrevspec -- -a-b-c--a
40 % hg debugrevspec -- -a-b-c--a
41 ('minus', ('minus', ('minus', ('negate', ('symbol', 'a')), ('symbol', 'b')), ('symbol', 'c')), ('negate', ('symbol', 'a')))
41 ('minus', ('minus', ('minus', ('negate', ('symbol', 'a')), ('symbol', 'b')), ('symbol', 'c')), ('negate', ('symbol', 'a')))
42 abort: unknown revision '-a'!
42 abort: unknown revision '-a'!
43 % hg debugrevspec Γ©
43 % hg debugrevspec Γ©
44 ('symbol', '\xc3\xa9')
44 ('symbol', '\xc3\xa9')
45 9
45 9
46 % hg debugrevspec "-a-b-c-"-a
46 % hg debugrevspec "-a-b-c-"-a
47 ('minus', ('string', '-a-b-c-'), ('symbol', 'a'))
47 ('minus', ('string', '-a-b-c-'), ('symbol', 'a'))
48 4
48 4
49 % log '1 or 2'
49 % log '1 or 2'
50 1
50 1
51 2
51 2
52 % log '1|2'
52 % log '1|2'
53 1
53 1
54 2
54 2
55 % log '1 and 2'
55 % log '1 and 2'
56 % log '1&2'
56 % log '1&2'
57 % hg debugrevspec 1&2|3
57 % hg debugrevspec 1&2|3
58 ('or', ('and', ('symbol', '1'), ('symbol', '2')), ('symbol', '3'))
58 ('or', ('and', ('symbol', '1'), ('symbol', '2')), ('symbol', '3'))
59 3
59 3
60 % hg debugrevspec 1|2&3
60 % hg debugrevspec 1|2&3
61 ('or', ('symbol', '1'), ('and', ('symbol', '2'), ('symbol', '3')))
61 ('or', ('symbol', '1'), ('and', ('symbol', '2'), ('symbol', '3')))
62 1
62 1
63 % hg debugrevspec 1&2&3
63 % hg debugrevspec 1&2&3
64 ('and', ('and', ('symbol', '1'), ('symbol', '2')), ('symbol', '3'))
64 ('and', ('and', ('symbol', '1'), ('symbol', '2')), ('symbol', '3'))
65 % hg debugrevspec 1|(2|3)
65 % hg debugrevspec 1|(2|3)
66 ('or', ('symbol', '1'), ('group', ('or', ('symbol', '2'), ('symbol', '3'))))
66 ('or', ('symbol', '1'), ('group', ('or', ('symbol', '2'), ('symbol', '3'))))
67 1
67 1
68 2
68 2
69 3
69 3
70 % log '1.0'
70 % log '1.0'
71 6
71 6
72 % log 'a'
72 % log 'a'
73 0
73 0
74 % log '2785f51ee'
74 % log '2785f51ee'
75 0
75 0
76 % log 'date(2005)'
76 % log 'date(2005)'
77 4
77 4
78 % log 'date(this is a test)'
78 % log 'date(this is a test)'
79 hg: parse error at 10: unexpected token: symbol
79 hg: parse error at 10: unexpected token: symbol
80 % log 'date()'
80 % log 'date()'
81 hg: parse error: date wants a string
81 hg: parse error: date wants a string
82 % log 'date'
82 % log 'date'
83 hg: parse error: can't use date here
83 hg: parse error: can't use date here
84 % log 'date('
84 % log 'date('
85 hg: parse error at 5: not a prefix: end
85 hg: parse error at 5: not a prefix: end
86 % log 'date(tip)'
86 % log 'date(tip)'
87 abort: invalid date: 'tip'
87 abort: invalid date: 'tip'
88 % log '"date"'
88 % log '"date"'
89 abort: unknown revision 'date'!
89 abort: unknown revision 'date'!
90 % log 'date(2005) and 1::'
90 % log 'date(2005) and 1::'
91 4
91 4
92 % log 'ancestor(1)'
92 % log 'ancestor(1)'
93 hg: parse error: ancestor wants two arguments
93 hg: parse error: ancestor wants two arguments
94 % log 'ancestor(4,5)'
94 % log 'ancestor(4,5)'
95 1
95 1
96 % log 'ancestor(4,5) and 4'
96 % log 'ancestor(4,5) and 4'
97 % log 'ancestors(5)'
97 % log 'ancestors(5)'
98 0
98 0
99 1
99 1
100 3
100 3
101 5
101 5
102 % log 'author(bob)'
102 % log 'author(bob)'
103 2
103 2
104 % log 'branch(Γ©)'
104 % log 'branch(Γ©)'
105 8
105 8
106 9
106 9
107 % log 'children(ancestor(4,5))'
107 % log 'children(ancestor(4,5))'
108 2
108 2
109 3
109 3
110 % log 'closed()'
110 % log 'closed()'
111 % log 'contains(a)'
111 % log 'contains(a)'
112 0
112 0
113 1
113 1
114 3
114 3
115 5
115 5
116 % log 'descendants(2 or 3)'
116 % log 'descendants(2 or 3)'
117 2
117 2
118 3
118 3
119 4
119 4
120 5
120 5
121 6
121 6
122 7
122 7
123 8
123 8
124 9
124 9
125 % log 'file(b)'
125 % log 'file(b)'
126 1
126 1
127 4
127 4
128 % log 'follow()'
128 % log 'follow()'
129 0
129 0
130 1
130 1
131 2
131 2
132 4
132 4
133 8
133 8
134 9
134 9
135 % log 'grep("issue\d+")'
135 % log 'grep("issue\d+")'
136 6
136 6
137 % log 'head()'
137 % log 'head()'
138 0
138 0
139 1
139 1
140 2
140 2
141 3
141 3
142 4
142 4
143 5
143 5
144 6
144 6
145 7
145 7
146 9
146 9
147 % log 'heads(6::)'
147 % log 'heads(6::)'
148 7
148 7
149 % log 'keyword(issue)'
149 % log 'keyword(issue)'
150 6
150 6
151 % log 'limit(head(), 1)'
151 % log 'limit(head(), 1)'
152 0
152 0
153 % log 'max(contains(a))'
153 % log 'max(contains(a))'
154 5
154 5
155 % log 'min(contains(a))'
156 0
155 % log 'merge()'
157 % log 'merge()'
156 6
158 6
157 % log 'modifies(b)'
159 % log 'modifies(b)'
158 4
160 4
159 % log 'p1(merge())'
161 % log 'p1(merge())'
160 5
162 5
161 % log 'p2(merge())'
163 % log 'p2(merge())'
162 4
164 4
163 % log 'parents(merge())'
165 % log 'parents(merge())'
164 4
166 4
165 5
167 5
166 % log 'removes(a)'
168 % log 'removes(a)'
167 2
169 2
168 6
170 6
169 % log 'roots(all())'
171 % log 'roots(all())'
170 0
172 0
171 % log 'reverse(2 or 3 or 4 or 5)'
173 % log 'reverse(2 or 3 or 4 or 5)'
172 5
174 5
173 4
175 4
174 3
176 3
175 2
177 2
176 % log 'sort(limit(reverse(all()), 3))'
178 % log 'sort(limit(reverse(all()), 3))'
177 7
179 7
178 8
180 8
179 9
181 9
180 % log 'sort(2 or 3 or 4 or 5, date)'
182 % log 'sort(2 or 3 or 4 or 5, date)'
181 2
183 2
182 3
184 3
183 5
185 5
184 4
186 4
185 % log 'tagged()'
187 % log 'tagged()'
186 6
188 6
187 % log 'user(bob)'
189 % log 'user(bob)'
188 2
190 2
189 % log '4::8'
191 % log '4::8'
190 4
192 4
191 8
193 8
192 % log '4:8'
194 % log '4:8'
193 4
195 4
194 5
196 5
195 6
197 6
196 7
198 7
197 8
199 8
198 % log 'sort(!merge() & (modifies(b) | user(bob) | keyword(bug) | keyword(issue) & 1::9), "-date")'
200 % log 'sort(!merge() & (modifies(b) | user(bob) | keyword(bug) | keyword(issue) & 1::9), "-date")'
199 4
201 4
200 2
202 2
201 5
203 5
202 % log 'not 0 and 0:2'
204 % log 'not 0 and 0:2'
203 1
205 1
204 2
206 2
205 % log 'not 1 and 0:2'
207 % log 'not 1 and 0:2'
206 0
208 0
207 2
209 2
208 % log 'not 2 and 0:2'
210 % log 'not 2 and 0:2'
209 0
211 0
210 1
212 1
211 % log '(1 and 2)::'
213 % log '(1 and 2)::'
212 % log '(1 and 2):'
214 % log '(1 and 2):'
213 % log '(1 and 2):3'
215 % log '(1 and 2):3'
214 % log 'sort(head(), -rev)'
216 % log 'sort(head(), -rev)'
215 9
217 9
216 7
218 7
217 6
219 6
218 5
220 5
219 4
221 4
220 3
222 3
221 2
223 2
222 1
224 1
223 0
225 0
General Comments 0
You need to be logged in to leave comments. Login now