##// END OF EJS Templates
dirstate: warn on invalid parents rather than aborting...
Matt Mackall -
r13032:e41e2b79 default
parent child Browse files
Show More
@@ -1,573 +1,571 b''
1 # Mercurial extension to provide the 'hg bookmark' command
1 # Mercurial extension to provide the 'hg bookmark' command
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''track a line of development with movable markers
8 '''track a line of development with movable markers
9
9
10 Bookmarks are local movable markers to changesets. Every bookmark
10 Bookmarks are local movable markers to changesets. Every bookmark
11 points to a changeset identified by its hash. If you commit a
11 points to a changeset identified by its hash. If you commit a
12 changeset that is based on a changeset that has a bookmark on it, the
12 changeset that is based on a changeset that has a bookmark on it, the
13 bookmark shifts to the new changeset.
13 bookmark shifts to the new changeset.
14
14
15 It is possible to use bookmark names in every revision lookup (e.g.
15 It is possible to use bookmark names in every revision lookup (e.g.
16 :hg:`merge`, :hg:`update`).
16 :hg:`merge`, :hg:`update`).
17
17
18 By default, when several bookmarks point to the same changeset, they
18 By default, when several bookmarks point to the same changeset, they
19 will all move forward together. It is possible to obtain a more
19 will all move forward together. It is possible to obtain a more
20 git-like experience by adding the following configuration option to
20 git-like experience by adding the following configuration option to
21 your configuration file::
21 your configuration file::
22
22
23 [bookmarks]
23 [bookmarks]
24 track.current = True
24 track.current = True
25
25
26 This will cause Mercurial to track the bookmark that you are currently
26 This will cause Mercurial to track the bookmark that you are currently
27 using, and only update it. This is similar to git's approach to
27 using, and only update it. This is similar to git's approach to
28 branching.
28 branching.
29 '''
29 '''
30
30
31 from mercurial.i18n import _
31 from mercurial.i18n import _
32 from mercurial.node import nullid, nullrev, bin, hex, short
32 from mercurial.node import nullid, nullrev, bin, hex, short
33 from mercurial import util, commands, repair, extensions, pushkey, hg, url
33 from mercurial import util, commands, repair, extensions, pushkey, hg, url
34 from mercurial import revset
34 from mercurial import revset
35 import os
35 import os
36
36
37 def write(repo):
37 def write(repo):
38 '''Write bookmarks
38 '''Write bookmarks
39
39
40 Write the given bookmark => hash dictionary to the .hg/bookmarks file
40 Write the given bookmark => hash dictionary to the .hg/bookmarks file
41 in a format equal to those of localtags.
41 in a format equal to those of localtags.
42
42
43 We also store a backup of the previous state in undo.bookmarks that
43 We also store a backup of the previous state in undo.bookmarks that
44 can be copied back on rollback.
44 can be copied back on rollback.
45 '''
45 '''
46 refs = repo._bookmarks
46 refs = repo._bookmarks
47 if os.path.exists(repo.join('bookmarks')):
47 if os.path.exists(repo.join('bookmarks')):
48 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
48 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
49 if repo._bookmarkcurrent not in refs:
49 if repo._bookmarkcurrent not in refs:
50 setcurrent(repo, None)
50 setcurrent(repo, None)
51 wlock = repo.wlock()
51 wlock = repo.wlock()
52 try:
52 try:
53 file = repo.opener('bookmarks', 'w', atomictemp=True)
53 file = repo.opener('bookmarks', 'w', atomictemp=True)
54 for refspec, node in refs.iteritems():
54 for refspec, node in refs.iteritems():
55 file.write("%s %s\n" % (hex(node), refspec))
55 file.write("%s %s\n" % (hex(node), refspec))
56 file.rename()
56 file.rename()
57
57
58 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
58 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
59 try:
59 try:
60 os.utime(repo.sjoin('00changelog.i'), None)
60 os.utime(repo.sjoin('00changelog.i'), None)
61 except OSError:
61 except OSError:
62 pass
62 pass
63
63
64 finally:
64 finally:
65 wlock.release()
65 wlock.release()
66
66
67 def setcurrent(repo, mark):
67 def setcurrent(repo, mark):
68 '''Set the name of the bookmark that we are currently on
68 '''Set the name of the bookmark that we are currently on
69
69
70 Set the name of the bookmark that we are on (hg update <bookmark>).
70 Set the name of the bookmark that we are on (hg update <bookmark>).
71 The name is recorded in .hg/bookmarks.current
71 The name is recorded in .hg/bookmarks.current
72 '''
72 '''
73 current = repo._bookmarkcurrent
73 current = repo._bookmarkcurrent
74 if current == mark:
74 if current == mark:
75 return
75 return
76
76
77 refs = repo._bookmarks
77 refs = repo._bookmarks
78
78
79 # do not update if we do update to a rev equal to the current bookmark
79 # do not update if we do update to a rev equal to the current bookmark
80 if (mark and mark not in refs and
80 if (mark and mark not in refs and
81 current and refs[current] == repo.changectx('.').node()):
81 current and refs[current] == repo.changectx('.').node()):
82 return
82 return
83 if mark not in refs:
83 if mark not in refs:
84 mark = ''
84 mark = ''
85 wlock = repo.wlock()
85 wlock = repo.wlock()
86 try:
86 try:
87 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
87 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
88 file.write(mark)
88 file.write(mark)
89 file.rename()
89 file.rename()
90 finally:
90 finally:
91 wlock.release()
91 wlock.release()
92 repo._bookmarkcurrent = mark
92 repo._bookmarkcurrent = mark
93
93
94 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
94 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
95 '''track a line of development with movable markers
95 '''track a line of development with movable markers
96
96
97 Bookmarks are pointers to certain commits that move when
97 Bookmarks are pointers to certain commits that move when
98 committing. Bookmarks are local. They can be renamed, copied and
98 committing. Bookmarks are local. They can be renamed, copied and
99 deleted. It is possible to use bookmark names in :hg:`merge` and
99 deleted. It is possible to use bookmark names in :hg:`merge` and
100 :hg:`update` to merge and update respectively to a given bookmark.
100 :hg:`update` to merge and update respectively to a given bookmark.
101
101
102 You can use :hg:`bookmark NAME` to set a bookmark on the working
102 You can use :hg:`bookmark NAME` to set a bookmark on the working
103 directory's parent revision with the given name. If you specify
103 directory's parent revision with the given name. If you specify
104 a revision using -r REV (where REV may be an existing bookmark),
104 a revision using -r REV (where REV may be an existing bookmark),
105 the bookmark is assigned to that revision.
105 the bookmark is assigned to that revision.
106
106
107 Bookmarks can be pushed and pulled between repositories (see :hg:`help
107 Bookmarks can be pushed and pulled between repositories (see :hg:`help
108 push` and :hg:`help pull`). This requires the bookmark extension to be
108 push` and :hg:`help pull`). This requires the bookmark extension to be
109 enabled for both the local and remote repositories.
109 enabled for both the local and remote repositories.
110 '''
110 '''
111 hexfn = ui.debugflag and hex or short
111 hexfn = ui.debugflag and hex or short
112 marks = repo._bookmarks
112 marks = repo._bookmarks
113 cur = repo.changectx('.').node()
113 cur = repo.changectx('.').node()
114
114
115 if rename:
115 if rename:
116 if rename not in marks:
116 if rename not in marks:
117 raise util.Abort(_("a bookmark of this name does not exist"))
117 raise util.Abort(_("a bookmark of this name does not exist"))
118 if mark in marks and not force:
118 if mark in marks and not force:
119 raise util.Abort(_("a bookmark of the same name already exists"))
119 raise util.Abort(_("a bookmark of the same name already exists"))
120 if mark is None:
120 if mark is None:
121 raise util.Abort(_("new bookmark name required"))
121 raise util.Abort(_("new bookmark name required"))
122 marks[mark] = marks[rename]
122 marks[mark] = marks[rename]
123 del marks[rename]
123 del marks[rename]
124 if repo._bookmarkcurrent == rename:
124 if repo._bookmarkcurrent == rename:
125 setcurrent(repo, mark)
125 setcurrent(repo, mark)
126 write(repo)
126 write(repo)
127 return
127 return
128
128
129 if delete:
129 if delete:
130 if mark is None:
130 if mark is None:
131 raise util.Abort(_("bookmark name required"))
131 raise util.Abort(_("bookmark name required"))
132 if mark not in marks:
132 if mark not in marks:
133 raise util.Abort(_("a bookmark of this name does not exist"))
133 raise util.Abort(_("a bookmark of this name does not exist"))
134 if mark == repo._bookmarkcurrent:
134 if mark == repo._bookmarkcurrent:
135 setcurrent(repo, None)
135 setcurrent(repo, None)
136 del marks[mark]
136 del marks[mark]
137 write(repo)
137 write(repo)
138 return
138 return
139
139
140 if mark is not None:
140 if mark is not None:
141 if "\n" in mark:
141 if "\n" in mark:
142 raise util.Abort(_("bookmark name cannot contain newlines"))
142 raise util.Abort(_("bookmark name cannot contain newlines"))
143 mark = mark.strip()
143 mark = mark.strip()
144 if not mark:
144 if not mark:
145 raise util.Abort(_("bookmark names cannot consist entirely of "
145 raise util.Abort(_("bookmark names cannot consist entirely of "
146 "whitespace"))
146 "whitespace"))
147 if mark in marks and not force:
147 if mark in marks and not force:
148 raise util.Abort(_("a bookmark of the same name already exists"))
148 raise util.Abort(_("a bookmark of the same name already exists"))
149 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
149 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
150 and not force):
150 and not force):
151 raise util.Abort(
151 raise util.Abort(
152 _("a bookmark cannot have the name of an existing branch"))
152 _("a bookmark cannot have the name of an existing branch"))
153 if rev:
153 if rev:
154 marks[mark] = repo.lookup(rev)
154 marks[mark] = repo.lookup(rev)
155 else:
155 else:
156 marks[mark] = repo.changectx('.').node()
156 marks[mark] = repo.changectx('.').node()
157 setcurrent(repo, mark)
157 setcurrent(repo, mark)
158 write(repo)
158 write(repo)
159 return
159 return
160
160
161 if mark is None:
161 if mark is None:
162 if rev:
162 if rev:
163 raise util.Abort(_("bookmark name required"))
163 raise util.Abort(_("bookmark name required"))
164 if len(marks) == 0:
164 if len(marks) == 0:
165 ui.status(_("no bookmarks set\n"))
165 ui.status(_("no bookmarks set\n"))
166 else:
166 else:
167 for bmark, n in marks.iteritems():
167 for bmark, n in marks.iteritems():
168 if ui.configbool('bookmarks', 'track.current'):
168 if ui.configbool('bookmarks', 'track.current'):
169 current = repo._bookmarkcurrent
169 current = repo._bookmarkcurrent
170 if bmark == current and n == cur:
170 if bmark == current and n == cur:
171 prefix, label = '*', 'bookmarks.current'
171 prefix, label = '*', 'bookmarks.current'
172 else:
172 else:
173 prefix, label = ' ', ''
173 prefix, label = ' ', ''
174 else:
174 else:
175 if n == cur:
175 if n == cur:
176 prefix, label = '*', 'bookmarks.current'
176 prefix, label = '*', 'bookmarks.current'
177 else:
177 else:
178 prefix, label = ' ', ''
178 prefix, label = ' ', ''
179
179
180 if ui.quiet:
180 if ui.quiet:
181 ui.write("%s\n" % bmark, label=label)
181 ui.write("%s\n" % bmark, label=label)
182 else:
182 else:
183 ui.write(" %s %-25s %d:%s\n" % (
183 ui.write(" %s %-25s %d:%s\n" % (
184 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
184 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
185 label=label)
185 label=label)
186 return
186 return
187
187
188 def _revstostrip(changelog, node):
188 def _revstostrip(changelog, node):
189 srev = changelog.rev(node)
189 srev = changelog.rev(node)
190 tostrip = [srev]
190 tostrip = [srev]
191 saveheads = []
191 saveheads = []
192 for r in xrange(srev, len(changelog)):
192 for r in xrange(srev, len(changelog)):
193 parents = changelog.parentrevs(r)
193 parents = changelog.parentrevs(r)
194 if parents[0] in tostrip or parents[1] in tostrip:
194 if parents[0] in tostrip or parents[1] in tostrip:
195 tostrip.append(r)
195 tostrip.append(r)
196 if parents[1] != nullrev:
196 if parents[1] != nullrev:
197 for p in parents:
197 for p in parents:
198 if p not in tostrip and p > srev:
198 if p not in tostrip and p > srev:
199 saveheads.append(p)
199 saveheads.append(p)
200 return [r for r in tostrip if r not in saveheads]
200 return [r for r in tostrip if r not in saveheads]
201
201
202 def strip(oldstrip, ui, repo, node, backup="all"):
202 def strip(oldstrip, ui, repo, node, backup="all"):
203 """Strip bookmarks if revisions are stripped using
203 """Strip bookmarks if revisions are stripped using
204 the mercurial.strip method. This usually happens during
204 the mercurial.strip method. This usually happens during
205 qpush and qpop"""
205 qpush and qpop"""
206 revisions = _revstostrip(repo.changelog, node)
206 revisions = _revstostrip(repo.changelog, node)
207 marks = repo._bookmarks
207 marks = repo._bookmarks
208 update = []
208 update = []
209 for mark, n in marks.iteritems():
209 for mark, n in marks.iteritems():
210 if repo.changelog.rev(n) in revisions:
210 if repo.changelog.rev(n) in revisions:
211 update.append(mark)
211 update.append(mark)
212 oldstrip(ui, repo, node, backup)
212 oldstrip(ui, repo, node, backup)
213 if len(update) > 0:
213 if len(update) > 0:
214 for m in update:
214 for m in update:
215 marks[m] = repo.changectx('.').node()
215 marks[m] = repo.changectx('.').node()
216 write(repo)
216 write(repo)
217
217
218 def reposetup(ui, repo):
218 def reposetup(ui, repo):
219 if not repo.local():
219 if not repo.local():
220 return
220 return
221
221
222 class bookmark_repo(repo.__class__):
222 class bookmark_repo(repo.__class__):
223
223
224 @util.propertycache
224 @util.propertycache
225 def _bookmarks(self):
225 def _bookmarks(self):
226 '''Parse .hg/bookmarks file and return a dictionary
226 '''Parse .hg/bookmarks file and return a dictionary
227
227
228 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
228 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
229 in the .hg/bookmarks file.
229 in the .hg/bookmarks file.
230 Read the file and return a (name=>nodeid) dictionary
230 Read the file and return a (name=>nodeid) dictionary
231 '''
231 '''
232 try:
232 try:
233 bookmarks = {}
233 bookmarks = {}
234 for line in self.opener('bookmarks'):
234 for line in self.opener('bookmarks'):
235 sha, refspec = line.strip().split(' ', 1)
235 sha, refspec = line.strip().split(' ', 1)
236 bookmarks[refspec] = self.changelog.lookup(sha)
236 bookmarks[refspec] = self.changelog.lookup(sha)
237 except:
237 except:
238 pass
238 pass
239 return bookmarks
239 return bookmarks
240
240
241 @util.propertycache
241 @util.propertycache
242 def _bookmarkcurrent(self):
242 def _bookmarkcurrent(self):
243 '''Get the current bookmark
243 '''Get the current bookmark
244
244
245 If we use gittishsh branches we have a current bookmark that
245 If we use gittishsh branches we have a current bookmark that
246 we are on. This function returns the name of the bookmark. It
246 we are on. This function returns the name of the bookmark. It
247 is stored in .hg/bookmarks.current
247 is stored in .hg/bookmarks.current
248 '''
248 '''
249 mark = None
249 mark = None
250 if os.path.exists(self.join('bookmarks.current')):
250 if os.path.exists(self.join('bookmarks.current')):
251 file = self.opener('bookmarks.current')
251 file = self.opener('bookmarks.current')
252 # No readline() in posixfile_nt, reading everything is cheap
252 # No readline() in posixfile_nt, reading everything is cheap
253 mark = (file.readlines() or [''])[0]
253 mark = (file.readlines() or [''])[0]
254 if mark == '':
254 if mark == '':
255 mark = None
255 mark = None
256 file.close()
256 file.close()
257 return mark
257 return mark
258
258
259 def rollback(self, *args):
259 def rollback(self, *args):
260 if os.path.exists(self.join('undo.bookmarks')):
260 if os.path.exists(self.join('undo.bookmarks')):
261 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
261 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
262 return super(bookmark_repo, self).rollback(*args)
262 return super(bookmark_repo, self).rollback(*args)
263
263
264 def lookup(self, key):
264 def lookup(self, key):
265 if key in self._bookmarks:
265 if key in self._bookmarks:
266 key = self._bookmarks[key]
266 key = self._bookmarks[key]
267 return super(bookmark_repo, self).lookup(key)
267 return super(bookmark_repo, self).lookup(key)
268
268
269 def _bookmarksupdate(self, parents, node):
269 def _bookmarksupdate(self, parents, node):
270 marks = self._bookmarks
270 marks = self._bookmarks
271 update = False
271 update = False
272 if ui.configbool('bookmarks', 'track.current'):
272 if ui.configbool('bookmarks', 'track.current'):
273 mark = self._bookmarkcurrent
273 mark = self._bookmarkcurrent
274 if mark and marks[mark] in parents:
274 if mark and marks[mark] in parents:
275 marks[mark] = node
275 marks[mark] = node
276 update = True
276 update = True
277 else:
277 else:
278 for mark, n in marks.items():
278 for mark, n in marks.items():
279 if n in parents:
279 if n in parents:
280 marks[mark] = node
280 marks[mark] = node
281 update = True
281 update = True
282 if update:
282 if update:
283 write(self)
283 write(self)
284
284
285 def commitctx(self, ctx, error=False):
285 def commitctx(self, ctx, error=False):
286 """Add a revision to the repository and
286 """Add a revision to the repository and
287 move the bookmark"""
287 move the bookmark"""
288 wlock = self.wlock() # do both commit and bookmark with lock held
288 wlock = self.wlock() # do both commit and bookmark with lock held
289 try:
289 try:
290 node = super(bookmark_repo, self).commitctx(ctx, error)
290 node = super(bookmark_repo, self).commitctx(ctx, error)
291 if node is None:
291 if node is None:
292 return None
292 return None
293 parents = self.changelog.parents(node)
293 parents = self.changelog.parents(node)
294 if parents[1] == nullid:
294 if parents[1] == nullid:
295 parents = (parents[0],)
295 parents = (parents[0],)
296
296
297 self._bookmarksupdate(parents, node)
297 self._bookmarksupdate(parents, node)
298 return node
298 return node
299 finally:
299 finally:
300 wlock.release()
300 wlock.release()
301
301
302 def pull(self, remote, heads=None, force=False):
302 def pull(self, remote, heads=None, force=False):
303 result = super(bookmark_repo, self).pull(remote, heads, force)
303 result = super(bookmark_repo, self).pull(remote, heads, force)
304
304
305 self.ui.debug("checking for updated bookmarks\n")
305 self.ui.debug("checking for updated bookmarks\n")
306 rb = remote.listkeys('bookmarks')
306 rb = remote.listkeys('bookmarks')
307 changed = False
307 changed = False
308 for k in rb.keys():
308 for k in rb.keys():
309 if k in self._bookmarks:
309 if k in self._bookmarks:
310 nr, nl = rb[k], self._bookmarks[k]
310 nr, nl = rb[k], self._bookmarks[k]
311 if nr in self:
311 if nr in self:
312 cr = self[nr]
312 cr = self[nr]
313 cl = self[nl]
313 cl = self[nl]
314 if cl.rev() >= cr.rev():
314 if cl.rev() >= cr.rev():
315 continue
315 continue
316 if cr in cl.descendants():
316 if cr in cl.descendants():
317 self._bookmarks[k] = cr.node()
317 self._bookmarks[k] = cr.node()
318 changed = True
318 changed = True
319 self.ui.status(_("updating bookmark %s\n") % k)
319 self.ui.status(_("updating bookmark %s\n") % k)
320 else:
320 else:
321 self.ui.warn(_("not updating divergent"
321 self.ui.warn(_("not updating divergent"
322 " bookmark %s\n") % k)
322 " bookmark %s\n") % k)
323 if changed:
323 if changed:
324 write(repo)
324 write(repo)
325
325
326 return result
326 return result
327
327
328 def push(self, remote, force=False, revs=None, newbranch=False):
328 def push(self, remote, force=False, revs=None, newbranch=False):
329 result = super(bookmark_repo, self).push(remote, force, revs,
329 result = super(bookmark_repo, self).push(remote, force, revs,
330 newbranch)
330 newbranch)
331
331
332 self.ui.debug("checking for updated bookmarks\n")
332 self.ui.debug("checking for updated bookmarks\n")
333 rb = remote.listkeys('bookmarks')
333 rb = remote.listkeys('bookmarks')
334 for k in rb.keys():
334 for k in rb.keys():
335 if k in self._bookmarks:
335 if k in self._bookmarks:
336 nr, nl = rb[k], self._bookmarks[k]
336 nr, nl = rb[k], self._bookmarks[k]
337 if nr in self:
337 if nr in self:
338 cr = self[nr]
338 cr = self[nr]
339 cl = self[nl]
339 cl = self[nl]
340 if cl in cr.descendants():
340 if cl in cr.descendants():
341 r = remote.pushkey('bookmarks', k, nr, nl)
341 r = remote.pushkey('bookmarks', k, nr, nl)
342 if r:
342 if r:
343 self.ui.status(_("updating bookmark %s\n") % k)
343 self.ui.status(_("updating bookmark %s\n") % k)
344 else:
344 else:
345 self.ui.warn(_('updating bookmark %s'
345 self.ui.warn(_('updating bookmark %s'
346 ' failed!\n') % k)
346 ' failed!\n') % k)
347
347
348 return result
348 return result
349
349
350 def addchangegroup(self, *args, **kwargs):
350 def addchangegroup(self, *args, **kwargs):
351 parents = self.dirstate.parents()
352
353 result = super(bookmark_repo, self).addchangegroup(*args, **kwargs)
351 result = super(bookmark_repo, self).addchangegroup(*args, **kwargs)
354 if result > 1:
352 if result > 1:
355 # We have more heads than before
353 # We have more heads than before
356 return result
354 return result
357 node = self.changelog.tip()
355 node = self.changelog.tip()
358
356 parents = self.dirstate.parents()
359 self._bookmarksupdate(parents, node)
357 self._bookmarksupdate(parents, node)
360 return result
358 return result
361
359
362 def _findtags(self):
360 def _findtags(self):
363 """Merge bookmarks with normal tags"""
361 """Merge bookmarks with normal tags"""
364 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
362 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
365 tags.update(self._bookmarks)
363 tags.update(self._bookmarks)
366 return (tags, tagtypes)
364 return (tags, tagtypes)
367
365
368 if hasattr(repo, 'invalidate'):
366 if hasattr(repo, 'invalidate'):
369 def invalidate(self):
367 def invalidate(self):
370 super(bookmark_repo, self).invalidate()
368 super(bookmark_repo, self).invalidate()
371 for attr in ('_bookmarks', '_bookmarkcurrent'):
369 for attr in ('_bookmarks', '_bookmarkcurrent'):
372 if attr in self.__dict__:
370 if attr in self.__dict__:
373 delattr(self, attr)
371 delattr(self, attr)
374
372
375 repo.__class__ = bookmark_repo
373 repo.__class__ = bookmark_repo
376
374
377 def listbookmarks(repo):
375 def listbookmarks(repo):
378 # We may try to list bookmarks on a repo type that does not
376 # We may try to list bookmarks on a repo type that does not
379 # support it (e.g., statichttprepository).
377 # support it (e.g., statichttprepository).
380 if not hasattr(repo, '_bookmarks'):
378 if not hasattr(repo, '_bookmarks'):
381 return {}
379 return {}
382
380
383 d = {}
381 d = {}
384 for k, v in repo._bookmarks.iteritems():
382 for k, v in repo._bookmarks.iteritems():
385 d[k] = hex(v)
383 d[k] = hex(v)
386 return d
384 return d
387
385
388 def pushbookmark(repo, key, old, new):
386 def pushbookmark(repo, key, old, new):
389 w = repo.wlock()
387 w = repo.wlock()
390 try:
388 try:
391 marks = repo._bookmarks
389 marks = repo._bookmarks
392 if hex(marks.get(key, '')) != old:
390 if hex(marks.get(key, '')) != old:
393 return False
391 return False
394 if new == '':
392 if new == '':
395 del marks[key]
393 del marks[key]
396 else:
394 else:
397 if new not in repo:
395 if new not in repo:
398 return False
396 return False
399 marks[key] = repo[new].node()
397 marks[key] = repo[new].node()
400 write(repo)
398 write(repo)
401 return True
399 return True
402 finally:
400 finally:
403 w.release()
401 w.release()
404
402
405 def pull(oldpull, ui, repo, source="default", **opts):
403 def pull(oldpull, ui, repo, source="default", **opts):
406 # translate bookmark args to rev args for actual pull
404 # translate bookmark args to rev args for actual pull
407 if opts.get('bookmark'):
405 if opts.get('bookmark'):
408 # this is an unpleasant hack as pull will do this internally
406 # this is an unpleasant hack as pull will do this internally
409 source, branches = hg.parseurl(ui.expandpath(source),
407 source, branches = hg.parseurl(ui.expandpath(source),
410 opts.get('branch'))
408 opts.get('branch'))
411 other = hg.repository(hg.remoteui(repo, opts), source)
409 other = hg.repository(hg.remoteui(repo, opts), source)
412 rb = other.listkeys('bookmarks')
410 rb = other.listkeys('bookmarks')
413
411
414 for b in opts['bookmark']:
412 for b in opts['bookmark']:
415 if b not in rb:
413 if b not in rb:
416 raise util.Abort(_('remote bookmark %s not found!') % b)
414 raise util.Abort(_('remote bookmark %s not found!') % b)
417 opts.setdefault('rev', []).append(b)
415 opts.setdefault('rev', []).append(b)
418
416
419 result = oldpull(ui, repo, source, **opts)
417 result = oldpull(ui, repo, source, **opts)
420
418
421 # update specified bookmarks
419 # update specified bookmarks
422 if opts.get('bookmark'):
420 if opts.get('bookmark'):
423 for b in opts['bookmark']:
421 for b in opts['bookmark']:
424 # explicit pull overrides local bookmark if any
422 # explicit pull overrides local bookmark if any
425 ui.status(_("importing bookmark %s\n") % b)
423 ui.status(_("importing bookmark %s\n") % b)
426 repo._bookmarks[b] = repo[rb[b]].node()
424 repo._bookmarks[b] = repo[rb[b]].node()
427 write(repo)
425 write(repo)
428
426
429 return result
427 return result
430
428
431 def push(oldpush, ui, repo, dest=None, **opts):
429 def push(oldpush, ui, repo, dest=None, **opts):
432 dopush = True
430 dopush = True
433 if opts.get('bookmark'):
431 if opts.get('bookmark'):
434 dopush = False
432 dopush = False
435 for b in opts['bookmark']:
433 for b in opts['bookmark']:
436 if b in repo._bookmarks:
434 if b in repo._bookmarks:
437 dopush = True
435 dopush = True
438 opts.setdefault('rev', []).append(b)
436 opts.setdefault('rev', []).append(b)
439
437
440 result = 0
438 result = 0
441 if dopush:
439 if dopush:
442 result = oldpush(ui, repo, dest, **opts)
440 result = oldpush(ui, repo, dest, **opts)
443
441
444 if opts.get('bookmark'):
442 if opts.get('bookmark'):
445 # this is an unpleasant hack as push will do this internally
443 # this is an unpleasant hack as push will do this internally
446 dest = ui.expandpath(dest or 'default-push', dest or 'default')
444 dest = ui.expandpath(dest or 'default-push', dest or 'default')
447 dest, branches = hg.parseurl(dest, opts.get('branch'))
445 dest, branches = hg.parseurl(dest, opts.get('branch'))
448 other = hg.repository(hg.remoteui(repo, opts), dest)
446 other = hg.repository(hg.remoteui(repo, opts), dest)
449 rb = other.listkeys('bookmarks')
447 rb = other.listkeys('bookmarks')
450 for b in opts['bookmark']:
448 for b in opts['bookmark']:
451 # explicit push overrides remote bookmark if any
449 # explicit push overrides remote bookmark if any
452 if b in repo._bookmarks:
450 if b in repo._bookmarks:
453 ui.status(_("exporting bookmark %s\n") % b)
451 ui.status(_("exporting bookmark %s\n") % b)
454 new = repo[b].hex()
452 new = repo[b].hex()
455 elif b in rb:
453 elif b in rb:
456 ui.status(_("deleting remote bookmark %s\n") % b)
454 ui.status(_("deleting remote bookmark %s\n") % b)
457 new = '' # delete
455 new = '' # delete
458 else:
456 else:
459 ui.warn(_('bookmark %s does not exist on the local '
457 ui.warn(_('bookmark %s does not exist on the local '
460 'or remote repository!\n') % b)
458 'or remote repository!\n') % b)
461 return 2
459 return 2
462 old = rb.get(b, '')
460 old = rb.get(b, '')
463 r = other.pushkey('bookmarks', b, old, new)
461 r = other.pushkey('bookmarks', b, old, new)
464 if not r:
462 if not r:
465 ui.warn(_('updating bookmark %s failed!\n') % b)
463 ui.warn(_('updating bookmark %s failed!\n') % b)
466 if not result:
464 if not result:
467 result = 2
465 result = 2
468
466
469 return result
467 return result
470
468
471 def diffbookmarks(ui, repo, remote):
469 def diffbookmarks(ui, repo, remote):
472 ui.status(_("searching for changed bookmarks\n"))
470 ui.status(_("searching for changed bookmarks\n"))
473
471
474 lmarks = repo.listkeys('bookmarks')
472 lmarks = repo.listkeys('bookmarks')
475 rmarks = remote.listkeys('bookmarks')
473 rmarks = remote.listkeys('bookmarks')
476
474
477 diff = sorted(set(rmarks) - set(lmarks))
475 diff = sorted(set(rmarks) - set(lmarks))
478 for k in diff:
476 for k in diff:
479 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
477 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
480
478
481 if len(diff) <= 0:
479 if len(diff) <= 0:
482 ui.status(_("no changed bookmarks found\n"))
480 ui.status(_("no changed bookmarks found\n"))
483 return 1
481 return 1
484 return 0
482 return 0
485
483
486 def incoming(oldincoming, ui, repo, source="default", **opts):
484 def incoming(oldincoming, ui, repo, source="default", **opts):
487 if opts.get('bookmarks'):
485 if opts.get('bookmarks'):
488 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
486 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
489 other = hg.repository(hg.remoteui(repo, opts), source)
487 other = hg.repository(hg.remoteui(repo, opts), source)
490 ui.status(_('comparing with %s\n') % url.hidepassword(source))
488 ui.status(_('comparing with %s\n') % url.hidepassword(source))
491 return diffbookmarks(ui, repo, other)
489 return diffbookmarks(ui, repo, other)
492 else:
490 else:
493 return oldincoming(ui, repo, source, **opts)
491 return oldincoming(ui, repo, source, **opts)
494
492
495 def outgoing(oldoutgoing, ui, repo, dest=None, **opts):
493 def outgoing(oldoutgoing, ui, repo, dest=None, **opts):
496 if opts.get('bookmarks'):
494 if opts.get('bookmarks'):
497 dest = ui.expandpath(dest or 'default-push', dest or 'default')
495 dest = ui.expandpath(dest or 'default-push', dest or 'default')
498 dest, branches = hg.parseurl(dest, opts.get('branch'))
496 dest, branches = hg.parseurl(dest, opts.get('branch'))
499 other = hg.repository(hg.remoteui(repo, opts), dest)
497 other = hg.repository(hg.remoteui(repo, opts), dest)
500 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
498 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
501 return diffbookmarks(ui, other, repo)
499 return diffbookmarks(ui, other, repo)
502 else:
500 else:
503 return oldoutgoing(ui, repo, dest, **opts)
501 return oldoutgoing(ui, repo, dest, **opts)
504
502
505 def uisetup(ui):
503 def uisetup(ui):
506 extensions.wrapfunction(repair, "strip", strip)
504 extensions.wrapfunction(repair, "strip", strip)
507 if ui.configbool('bookmarks', 'track.current'):
505 if ui.configbool('bookmarks', 'track.current'):
508 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
506 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
509
507
510 entry = extensions.wrapcommand(commands.table, 'pull', pull)
508 entry = extensions.wrapcommand(commands.table, 'pull', pull)
511 entry[1].append(('B', 'bookmark', [],
509 entry[1].append(('B', 'bookmark', [],
512 _("bookmark to import"),
510 _("bookmark to import"),
513 _('BOOKMARK')))
511 _('BOOKMARK')))
514 entry = extensions.wrapcommand(commands.table, 'push', push)
512 entry = extensions.wrapcommand(commands.table, 'push', push)
515 entry[1].append(('B', 'bookmark', [],
513 entry[1].append(('B', 'bookmark', [],
516 _("bookmark to export"),
514 _("bookmark to export"),
517 _('BOOKMARK')))
515 _('BOOKMARK')))
518 entry = extensions.wrapcommand(commands.table, 'incoming', incoming)
516 entry = extensions.wrapcommand(commands.table, 'incoming', incoming)
519 entry[1].append(('B', 'bookmarks', False,
517 entry[1].append(('B', 'bookmarks', False,
520 _("compare bookmark")))
518 _("compare bookmark")))
521 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
519 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
522 entry[1].append(('B', 'bookmarks', False,
520 entry[1].append(('B', 'bookmarks', False,
523 _("compare bookmark")))
521 _("compare bookmark")))
524
522
525 pushkey.register('bookmarks', pushbookmark, listbookmarks)
523 pushkey.register('bookmarks', pushbookmark, listbookmarks)
526
524
527 def updatecurbookmark(orig, ui, repo, *args, **opts):
525 def updatecurbookmark(orig, ui, repo, *args, **opts):
528 '''Set the current bookmark
526 '''Set the current bookmark
529
527
530 If the user updates to a bookmark we update the .hg/bookmarks.current
528 If the user updates to a bookmark we update the .hg/bookmarks.current
531 file.
529 file.
532 '''
530 '''
533 res = orig(ui, repo, *args, **opts)
531 res = orig(ui, repo, *args, **opts)
534 rev = opts['rev']
532 rev = opts['rev']
535 if not rev and len(args) > 0:
533 if not rev and len(args) > 0:
536 rev = args[0]
534 rev = args[0]
537 setcurrent(repo, rev)
535 setcurrent(repo, rev)
538 return res
536 return res
539
537
540 def bmrevset(repo, subset, x):
538 def bmrevset(repo, subset, x):
541 """``bookmark([name])``
539 """``bookmark([name])``
542 The named bookmark or all bookmarks.
540 The named bookmark or all bookmarks.
543 """
541 """
544 # i18n: "bookmark" is a keyword
542 # i18n: "bookmark" is a keyword
545 args = revset.getargs(x, 0, 1, _('bookmark takes one or no arguments'))
543 args = revset.getargs(x, 0, 1, _('bookmark takes one or no arguments'))
546 if args:
544 if args:
547 bm = revset.getstring(args[0],
545 bm = revset.getstring(args[0],
548 # i18n: "bookmark" is a keyword
546 # i18n: "bookmark" is a keyword
549 _('the argument to bookmark must be a string'))
547 _('the argument to bookmark must be a string'))
550 bmrev = listbookmarks(repo).get(bm, None)
548 bmrev = listbookmarks(repo).get(bm, None)
551 if bmrev:
549 if bmrev:
552 bmrev = repo.changelog.rev(bin(bmrev))
550 bmrev = repo.changelog.rev(bin(bmrev))
553 return [r for r in subset if r == bmrev]
551 return [r for r in subset if r == bmrev]
554 bms = set([repo.changelog.rev(bin(r)) for r in listbookmarks(repo).values()])
552 bms = set([repo.changelog.rev(bin(r)) for r in listbookmarks(repo).values()])
555 return [r for r in subset if r in bms]
553 return [r for r in subset if r in bms]
556
554
557 def extsetup(ui):
555 def extsetup(ui):
558 revset.symbols['bookmark'] = bmrevset
556 revset.symbols['bookmark'] = bmrevset
559
557
560 cmdtable = {
558 cmdtable = {
561 "bookmarks":
559 "bookmarks":
562 (bookmark,
560 (bookmark,
563 [('f', 'force', False, _('force')),
561 [('f', 'force', False, _('force')),
564 ('r', 'rev', '', _('revision'), _('REV')),
562 ('r', 'rev', '', _('revision'), _('REV')),
565 ('d', 'delete', False, _('delete a given bookmark')),
563 ('d', 'delete', False, _('delete a given bookmark')),
566 ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))],
564 ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))],
567 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
565 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
568 }
566 }
569
567
570 colortable = {'bookmarks.current': 'green'}
568 colortable = {'bookmarks.current': 'green'}
571
569
572 # tell hggettext to extract docstrings from these functions:
570 # tell hggettext to extract docstrings from these functions:
573 i18nfunctions = [bmrevset]
571 i18nfunctions = [bmrevset]
@@ -1,681 +1,682 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid
8 from node import nullid
9 from i18n import _
9 from i18n import _
10 import util, ignore, osutil, parsers
10 import util, ignore, osutil, parsers
11 import struct, os, stat, errno
11 import struct, os, stat, errno
12 import cStringIO
12 import cStringIO
13
13
14 _format = ">cllll"
14 _format = ">cllll"
15 propertycache = util.propertycache
15 propertycache = util.propertycache
16
16
17 def _finddirs(path):
17 def _finddirs(path):
18 pos = path.rfind('/')
18 pos = path.rfind('/')
19 while pos != -1:
19 while pos != -1:
20 yield path[:pos]
20 yield path[:pos]
21 pos = path.rfind('/', 0, pos)
21 pos = path.rfind('/', 0, pos)
22
22
23 def _incdirs(dirs, path):
23 def _incdirs(dirs, path):
24 for base in _finddirs(path):
24 for base in _finddirs(path):
25 if base in dirs:
25 if base in dirs:
26 dirs[base] += 1
26 dirs[base] += 1
27 return
27 return
28 dirs[base] = 1
28 dirs[base] = 1
29
29
30 def _decdirs(dirs, path):
30 def _decdirs(dirs, path):
31 for base in _finddirs(path):
31 for base in _finddirs(path):
32 if dirs[base] > 1:
32 if dirs[base] > 1:
33 dirs[base] -= 1
33 dirs[base] -= 1
34 return
34 return
35 del dirs[base]
35 del dirs[base]
36
36
37 class dirstate(object):
37 class dirstate(object):
38
38
39 def __init__(self, opener, ui, root):
39 def __init__(self, opener, ui, root, validate):
40 '''Create a new dirstate object.
40 '''Create a new dirstate object.
41
41
42 opener is an open()-like callable that can be used to open the
42 opener is an open()-like callable that can be used to open the
43 dirstate file; root is the root of the directory tracked by
43 dirstate file; root is the root of the directory tracked by
44 the dirstate.
44 the dirstate.
45 '''
45 '''
46 self._opener = opener
46 self._opener = opener
47 self._validate = validate
47 self._root = root
48 self._root = root
48 self._rootdir = os.path.join(root, '')
49 self._rootdir = os.path.join(root, '')
49 self._dirty = False
50 self._dirty = False
50 self._dirtypl = False
51 self._dirtypl = False
51 self._ui = ui
52 self._ui = ui
52
53
53 @propertycache
54 @propertycache
54 def _map(self):
55 def _map(self):
55 '''Return the dirstate contents as a map from filename to
56 '''Return the dirstate contents as a map from filename to
56 (state, mode, size, time).'''
57 (state, mode, size, time).'''
57 self._read()
58 self._read()
58 return self._map
59 return self._map
59
60
60 @propertycache
61 @propertycache
61 def _copymap(self):
62 def _copymap(self):
62 self._read()
63 self._read()
63 return self._copymap
64 return self._copymap
64
65
65 @propertycache
66 @propertycache
66 def _foldmap(self):
67 def _foldmap(self):
67 f = {}
68 f = {}
68 for name in self._map:
69 for name in self._map:
69 f[os.path.normcase(name)] = name
70 f[os.path.normcase(name)] = name
70 return f
71 return f
71
72
72 @propertycache
73 @propertycache
73 def _branch(self):
74 def _branch(self):
74 try:
75 try:
75 return self._opener("branch").read().strip() or "default"
76 return self._opener("branch").read().strip() or "default"
76 except IOError:
77 except IOError:
77 return "default"
78 return "default"
78
79
79 @propertycache
80 @propertycache
80 def _pl(self):
81 def _pl(self):
81 try:
82 try:
82 st = self._opener("dirstate").read(40)
83 st = self._opener("dirstate").read(40)
83 l = len(st)
84 l = len(st)
84 if l == 40:
85 if l == 40:
85 return st[:20], st[20:40]
86 return st[:20], st[20:40]
86 elif l > 0 and l < 40:
87 elif l > 0 and l < 40:
87 raise util.Abort(_('working directory state appears damaged!'))
88 raise util.Abort(_('working directory state appears damaged!'))
88 except IOError, err:
89 except IOError, err:
89 if err.errno != errno.ENOENT:
90 if err.errno != errno.ENOENT:
90 raise
91 raise
91 return [nullid, nullid]
92 return [nullid, nullid]
92
93
93 @propertycache
94 @propertycache
94 def _dirs(self):
95 def _dirs(self):
95 dirs = {}
96 dirs = {}
96 for f, s in self._map.iteritems():
97 for f, s in self._map.iteritems():
97 if s[0] != 'r':
98 if s[0] != 'r':
98 _incdirs(dirs, f)
99 _incdirs(dirs, f)
99 return dirs
100 return dirs
100
101
101 @propertycache
102 @propertycache
102 def _ignore(self):
103 def _ignore(self):
103 files = [self._join('.hgignore')]
104 files = [self._join('.hgignore')]
104 for name, path in self._ui.configitems("ui"):
105 for name, path in self._ui.configitems("ui"):
105 if name == 'ignore' or name.startswith('ignore.'):
106 if name == 'ignore' or name.startswith('ignore.'):
106 files.append(util.expandpath(path))
107 files.append(util.expandpath(path))
107 return ignore.ignore(self._root, files, self._ui.warn)
108 return ignore.ignore(self._root, files, self._ui.warn)
108
109
109 @propertycache
110 @propertycache
110 def _slash(self):
111 def _slash(self):
111 return self._ui.configbool('ui', 'slash') and os.sep != '/'
112 return self._ui.configbool('ui', 'slash') and os.sep != '/'
112
113
113 @propertycache
114 @propertycache
114 def _checklink(self):
115 def _checklink(self):
115 return util.checklink(self._root)
116 return util.checklink(self._root)
116
117
117 @propertycache
118 @propertycache
118 def _checkexec(self):
119 def _checkexec(self):
119 return util.checkexec(self._root)
120 return util.checkexec(self._root)
120
121
121 @propertycache
122 @propertycache
122 def _checkcase(self):
123 def _checkcase(self):
123 return not util.checkcase(self._join('.hg'))
124 return not util.checkcase(self._join('.hg'))
124
125
125 def _join(self, f):
126 def _join(self, f):
126 # much faster than os.path.join()
127 # much faster than os.path.join()
127 # it's safe because f is always a relative path
128 # it's safe because f is always a relative path
128 return self._rootdir + f
129 return self._rootdir + f
129
130
130 def flagfunc(self, fallback):
131 def flagfunc(self, fallback):
131 if self._checklink:
132 if self._checklink:
132 if self._checkexec:
133 if self._checkexec:
133 def f(x):
134 def f(x):
134 p = self._join(x)
135 p = self._join(x)
135 if os.path.islink(p):
136 if os.path.islink(p):
136 return 'l'
137 return 'l'
137 if util.is_exec(p):
138 if util.is_exec(p):
138 return 'x'
139 return 'x'
139 return ''
140 return ''
140 return f
141 return f
141 def f(x):
142 def f(x):
142 if os.path.islink(self._join(x)):
143 if os.path.islink(self._join(x)):
143 return 'l'
144 return 'l'
144 if 'x' in fallback(x):
145 if 'x' in fallback(x):
145 return 'x'
146 return 'x'
146 return ''
147 return ''
147 return f
148 return f
148 if self._checkexec:
149 if self._checkexec:
149 def f(x):
150 def f(x):
150 if 'l' in fallback(x):
151 if 'l' in fallback(x):
151 return 'l'
152 return 'l'
152 if util.is_exec(self._join(x)):
153 if util.is_exec(self._join(x)):
153 return 'x'
154 return 'x'
154 return ''
155 return ''
155 return f
156 return f
156 return fallback
157 return fallback
157
158
158 def getcwd(self):
159 def getcwd(self):
159 cwd = os.getcwd()
160 cwd = os.getcwd()
160 if cwd == self._root:
161 if cwd == self._root:
161 return ''
162 return ''
162 # self._root ends with a path separator if self._root is '/' or 'C:\'
163 # self._root ends with a path separator if self._root is '/' or 'C:\'
163 rootsep = self._root
164 rootsep = self._root
164 if not util.endswithsep(rootsep):
165 if not util.endswithsep(rootsep):
165 rootsep += os.sep
166 rootsep += os.sep
166 if cwd.startswith(rootsep):
167 if cwd.startswith(rootsep):
167 return cwd[len(rootsep):]
168 return cwd[len(rootsep):]
168 else:
169 else:
169 # we're outside the repo. return an absolute path.
170 # we're outside the repo. return an absolute path.
170 return cwd
171 return cwd
171
172
172 def pathto(self, f, cwd=None):
173 def pathto(self, f, cwd=None):
173 if cwd is None:
174 if cwd is None:
174 cwd = self.getcwd()
175 cwd = self.getcwd()
175 path = util.pathto(self._root, cwd, f)
176 path = util.pathto(self._root, cwd, f)
176 if self._slash:
177 if self._slash:
177 return util.normpath(path)
178 return util.normpath(path)
178 return path
179 return path
179
180
180 def __getitem__(self, key):
181 def __getitem__(self, key):
181 '''Return the current state of key (a filename) in the dirstate.
182 '''Return the current state of key (a filename) in the dirstate.
182
183
183 States are:
184 States are:
184 n normal
185 n normal
185 m needs merging
186 m needs merging
186 r marked for removal
187 r marked for removal
187 a marked for addition
188 a marked for addition
188 ? not tracked
189 ? not tracked
189 '''
190 '''
190 return self._map.get(key, ("?",))[0]
191 return self._map.get(key, ("?",))[0]
191
192
192 def __contains__(self, key):
193 def __contains__(self, key):
193 return key in self._map
194 return key in self._map
194
195
195 def __iter__(self):
196 def __iter__(self):
196 for x in sorted(self._map):
197 for x in sorted(self._map):
197 yield x
198 yield x
198
199
199 def parents(self):
200 def parents(self):
200 return self._pl
201 return [self._validate(p) for p in self._pl]
201
202
202 def branch(self):
203 def branch(self):
203 return self._branch
204 return self._branch
204
205
205 def setparents(self, p1, p2=nullid):
206 def setparents(self, p1, p2=nullid):
206 self._dirty = self._dirtypl = True
207 self._dirty = self._dirtypl = True
207 self._pl = p1, p2
208 self._pl = p1, p2
208
209
209 def setbranch(self, branch):
210 def setbranch(self, branch):
210 if branch in ['tip', '.', 'null']:
211 if branch in ['tip', '.', 'null']:
211 raise util.Abort(_('the name \'%s\' is reserved') % branch)
212 raise util.Abort(_('the name \'%s\' is reserved') % branch)
212 self._branch = branch
213 self._branch = branch
213 self._opener("branch", "w").write(branch + '\n')
214 self._opener("branch", "w").write(branch + '\n')
214
215
215 def _read(self):
216 def _read(self):
216 self._map = {}
217 self._map = {}
217 self._copymap = {}
218 self._copymap = {}
218 try:
219 try:
219 st = self._opener("dirstate").read()
220 st = self._opener("dirstate").read()
220 except IOError, err:
221 except IOError, err:
221 if err.errno != errno.ENOENT:
222 if err.errno != errno.ENOENT:
222 raise
223 raise
223 return
224 return
224 if not st:
225 if not st:
225 return
226 return
226
227
227 p = parsers.parse_dirstate(self._map, self._copymap, st)
228 p = parsers.parse_dirstate(self._map, self._copymap, st)
228 if not self._dirtypl:
229 if not self._dirtypl:
229 self._pl = p
230 self._pl = p
230
231
231 def invalidate(self):
232 def invalidate(self):
232 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
233 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
233 if a in self.__dict__:
234 if a in self.__dict__:
234 delattr(self, a)
235 delattr(self, a)
235 self._dirty = False
236 self._dirty = False
236
237
237 def copy(self, source, dest):
238 def copy(self, source, dest):
238 """Mark dest as a copy of source. Unmark dest if source is None."""
239 """Mark dest as a copy of source. Unmark dest if source is None."""
239 if source == dest:
240 if source == dest:
240 return
241 return
241 self._dirty = True
242 self._dirty = True
242 if source is not None:
243 if source is not None:
243 self._copymap[dest] = source
244 self._copymap[dest] = source
244 elif dest in self._copymap:
245 elif dest in self._copymap:
245 del self._copymap[dest]
246 del self._copymap[dest]
246
247
247 def copied(self, file):
248 def copied(self, file):
248 return self._copymap.get(file, None)
249 return self._copymap.get(file, None)
249
250
250 def copies(self):
251 def copies(self):
251 return self._copymap
252 return self._copymap
252
253
253 def _droppath(self, f):
254 def _droppath(self, f):
254 if self[f] not in "?r" and "_dirs" in self.__dict__:
255 if self[f] not in "?r" and "_dirs" in self.__dict__:
255 _decdirs(self._dirs, f)
256 _decdirs(self._dirs, f)
256
257
257 def _addpath(self, f, check=False):
258 def _addpath(self, f, check=False):
258 oldstate = self[f]
259 oldstate = self[f]
259 if check or oldstate == "r":
260 if check or oldstate == "r":
260 if '\r' in f or '\n' in f:
261 if '\r' in f or '\n' in f:
261 raise util.Abort(
262 raise util.Abort(
262 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
263 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
263 if f in self._dirs:
264 if f in self._dirs:
264 raise util.Abort(_('directory %r already in dirstate') % f)
265 raise util.Abort(_('directory %r already in dirstate') % f)
265 # shadows
266 # shadows
266 for d in _finddirs(f):
267 for d in _finddirs(f):
267 if d in self._dirs:
268 if d in self._dirs:
268 break
269 break
269 if d in self._map and self[d] != 'r':
270 if d in self._map and self[d] != 'r':
270 raise util.Abort(
271 raise util.Abort(
271 _('file %r in dirstate clashes with %r') % (d, f))
272 _('file %r in dirstate clashes with %r') % (d, f))
272 if oldstate in "?r" and "_dirs" in self.__dict__:
273 if oldstate in "?r" and "_dirs" in self.__dict__:
273 _incdirs(self._dirs, f)
274 _incdirs(self._dirs, f)
274
275
275 def normal(self, f):
276 def normal(self, f):
276 '''Mark a file normal and clean.'''
277 '''Mark a file normal and clean.'''
277 self._dirty = True
278 self._dirty = True
278 self._addpath(f)
279 self._addpath(f)
279 s = os.lstat(self._join(f))
280 s = os.lstat(self._join(f))
280 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
281 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
281 if f in self._copymap:
282 if f in self._copymap:
282 del self._copymap[f]
283 del self._copymap[f]
283
284
284 def normallookup(self, f):
285 def normallookup(self, f):
285 '''Mark a file normal, but possibly dirty.'''
286 '''Mark a file normal, but possibly dirty.'''
286 if self._pl[1] != nullid and f in self._map:
287 if self._pl[1] != nullid and f in self._map:
287 # if there is a merge going on and the file was either
288 # if there is a merge going on and the file was either
288 # in state 'm' (-1) or coming from other parent (-2) before
289 # in state 'm' (-1) or coming from other parent (-2) before
289 # being removed, restore that state.
290 # being removed, restore that state.
290 entry = self._map[f]
291 entry = self._map[f]
291 if entry[0] == 'r' and entry[2] in (-1, -2):
292 if entry[0] == 'r' and entry[2] in (-1, -2):
292 source = self._copymap.get(f)
293 source = self._copymap.get(f)
293 if entry[2] == -1:
294 if entry[2] == -1:
294 self.merge(f)
295 self.merge(f)
295 elif entry[2] == -2:
296 elif entry[2] == -2:
296 self.otherparent(f)
297 self.otherparent(f)
297 if source:
298 if source:
298 self.copy(source, f)
299 self.copy(source, f)
299 return
300 return
300 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
301 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
301 return
302 return
302 self._dirty = True
303 self._dirty = True
303 self._addpath(f)
304 self._addpath(f)
304 self._map[f] = ('n', 0, -1, -1)
305 self._map[f] = ('n', 0, -1, -1)
305 if f in self._copymap:
306 if f in self._copymap:
306 del self._copymap[f]
307 del self._copymap[f]
307
308
308 def otherparent(self, f):
309 def otherparent(self, f):
309 '''Mark as coming from the other parent, always dirty.'''
310 '''Mark as coming from the other parent, always dirty.'''
310 if self._pl[1] == nullid:
311 if self._pl[1] == nullid:
311 raise util.Abort(_("setting %r to other parent "
312 raise util.Abort(_("setting %r to other parent "
312 "only allowed in merges") % f)
313 "only allowed in merges") % f)
313 self._dirty = True
314 self._dirty = True
314 self._addpath(f)
315 self._addpath(f)
315 self._map[f] = ('n', 0, -2, -1)
316 self._map[f] = ('n', 0, -2, -1)
316 if f in self._copymap:
317 if f in self._copymap:
317 del self._copymap[f]
318 del self._copymap[f]
318
319
319 def add(self, f):
320 def add(self, f):
320 '''Mark a file added.'''
321 '''Mark a file added.'''
321 self._dirty = True
322 self._dirty = True
322 self._addpath(f, True)
323 self._addpath(f, True)
323 self._map[f] = ('a', 0, -1, -1)
324 self._map[f] = ('a', 0, -1, -1)
324 if f in self._copymap:
325 if f in self._copymap:
325 del self._copymap[f]
326 del self._copymap[f]
326
327
327 def remove(self, f):
328 def remove(self, f):
328 '''Mark a file removed.'''
329 '''Mark a file removed.'''
329 self._dirty = True
330 self._dirty = True
330 self._droppath(f)
331 self._droppath(f)
331 size = 0
332 size = 0
332 if self._pl[1] != nullid and f in self._map:
333 if self._pl[1] != nullid and f in self._map:
333 # backup the previous state
334 # backup the previous state
334 entry = self._map[f]
335 entry = self._map[f]
335 if entry[0] == 'm': # merge
336 if entry[0] == 'm': # merge
336 size = -1
337 size = -1
337 elif entry[0] == 'n' and entry[2] == -2: # other parent
338 elif entry[0] == 'n' and entry[2] == -2: # other parent
338 size = -2
339 size = -2
339 self._map[f] = ('r', 0, size, 0)
340 self._map[f] = ('r', 0, size, 0)
340 if size == 0 and f in self._copymap:
341 if size == 0 and f in self._copymap:
341 del self._copymap[f]
342 del self._copymap[f]
342
343
343 def merge(self, f):
344 def merge(self, f):
344 '''Mark a file merged.'''
345 '''Mark a file merged.'''
345 self._dirty = True
346 self._dirty = True
346 s = os.lstat(self._join(f))
347 s = os.lstat(self._join(f))
347 self._addpath(f)
348 self._addpath(f)
348 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
349 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
349 if f in self._copymap:
350 if f in self._copymap:
350 del self._copymap[f]
351 del self._copymap[f]
351
352
352 def forget(self, f):
353 def forget(self, f):
353 '''Forget a file.'''
354 '''Forget a file.'''
354 self._dirty = True
355 self._dirty = True
355 try:
356 try:
356 self._droppath(f)
357 self._droppath(f)
357 del self._map[f]
358 del self._map[f]
358 except KeyError:
359 except KeyError:
359 self._ui.warn(_("not in dirstate: %s\n") % f)
360 self._ui.warn(_("not in dirstate: %s\n") % f)
360
361
361 def _normalize(self, path, knownpath):
362 def _normalize(self, path, knownpath):
362 norm_path = os.path.normcase(path)
363 norm_path = os.path.normcase(path)
363 fold_path = self._foldmap.get(norm_path, None)
364 fold_path = self._foldmap.get(norm_path, None)
364 if fold_path is None:
365 if fold_path is None:
365 if knownpath or not os.path.lexists(os.path.join(self._root, path)):
366 if knownpath or not os.path.lexists(os.path.join(self._root, path)):
366 fold_path = path
367 fold_path = path
367 else:
368 else:
368 fold_path = self._foldmap.setdefault(norm_path,
369 fold_path = self._foldmap.setdefault(norm_path,
369 util.fspath(path, self._root))
370 util.fspath(path, self._root))
370 return fold_path
371 return fold_path
371
372
372 def clear(self):
373 def clear(self):
373 self._map = {}
374 self._map = {}
374 if "_dirs" in self.__dict__:
375 if "_dirs" in self.__dict__:
375 delattr(self, "_dirs")
376 delattr(self, "_dirs")
376 self._copymap = {}
377 self._copymap = {}
377 self._pl = [nullid, nullid]
378 self._pl = [nullid, nullid]
378 self._dirty = True
379 self._dirty = True
379
380
380 def rebuild(self, parent, files):
381 def rebuild(self, parent, files):
381 self.clear()
382 self.clear()
382 for f in files:
383 for f in files:
383 if 'x' in files.flags(f):
384 if 'x' in files.flags(f):
384 self._map[f] = ('n', 0777, -1, 0)
385 self._map[f] = ('n', 0777, -1, 0)
385 else:
386 else:
386 self._map[f] = ('n', 0666, -1, 0)
387 self._map[f] = ('n', 0666, -1, 0)
387 self._pl = (parent, nullid)
388 self._pl = (parent, nullid)
388 self._dirty = True
389 self._dirty = True
389
390
390 def write(self):
391 def write(self):
391 if not self._dirty:
392 if not self._dirty:
392 return
393 return
393 st = self._opener("dirstate", "w", atomictemp=True)
394 st = self._opener("dirstate", "w", atomictemp=True)
394
395
395 # use the modification time of the newly created temporary file as the
396 # use the modification time of the newly created temporary file as the
396 # filesystem's notion of 'now'
397 # filesystem's notion of 'now'
397 now = int(util.fstat(st).st_mtime)
398 now = int(util.fstat(st).st_mtime)
398
399
399 cs = cStringIO.StringIO()
400 cs = cStringIO.StringIO()
400 copymap = self._copymap
401 copymap = self._copymap
401 pack = struct.pack
402 pack = struct.pack
402 write = cs.write
403 write = cs.write
403 write("".join(self._pl))
404 write("".join(self._pl))
404 for f, e in self._map.iteritems():
405 for f, e in self._map.iteritems():
405 if e[0] == 'n' and e[3] == now:
406 if e[0] == 'n' and e[3] == now:
406 # The file was last modified "simultaneously" with the current
407 # The file was last modified "simultaneously" with the current
407 # write to dirstate (i.e. within the same second for file-
408 # write to dirstate (i.e. within the same second for file-
408 # systems with a granularity of 1 sec). This commonly happens
409 # systems with a granularity of 1 sec). This commonly happens
409 # for at least a couple of files on 'update'.
410 # for at least a couple of files on 'update'.
410 # The user could change the file without changing its size
411 # The user could change the file without changing its size
411 # within the same second. Invalidate the file's stat data in
412 # within the same second. Invalidate the file's stat data in
412 # dirstate, forcing future 'status' calls to compare the
413 # dirstate, forcing future 'status' calls to compare the
413 # contents of the file. This prevents mistakenly treating such
414 # contents of the file. This prevents mistakenly treating such
414 # files as clean.
415 # files as clean.
415 e = (e[0], 0, -1, -1) # mark entry as 'unset'
416 e = (e[0], 0, -1, -1) # mark entry as 'unset'
416 self._map[f] = e
417 self._map[f] = e
417
418
418 if f in copymap:
419 if f in copymap:
419 f = "%s\0%s" % (f, copymap[f])
420 f = "%s\0%s" % (f, copymap[f])
420 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
421 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
421 write(e)
422 write(e)
422 write(f)
423 write(f)
423 st.write(cs.getvalue())
424 st.write(cs.getvalue())
424 st.rename()
425 st.rename()
425 self._dirty = self._dirtypl = False
426 self._dirty = self._dirtypl = False
426
427
427 def _dirignore(self, f):
428 def _dirignore(self, f):
428 if f == '.':
429 if f == '.':
429 return False
430 return False
430 if self._ignore(f):
431 if self._ignore(f):
431 return True
432 return True
432 for p in _finddirs(f):
433 for p in _finddirs(f):
433 if self._ignore(p):
434 if self._ignore(p):
434 return True
435 return True
435 return False
436 return False
436
437
437 def walk(self, match, subrepos, unknown, ignored):
438 def walk(self, match, subrepos, unknown, ignored):
438 '''
439 '''
439 Walk recursively through the directory tree, finding all files
440 Walk recursively through the directory tree, finding all files
440 matched by match.
441 matched by match.
441
442
442 Return a dict mapping filename to stat-like object (either
443 Return a dict mapping filename to stat-like object (either
443 mercurial.osutil.stat instance or return value of os.stat()).
444 mercurial.osutil.stat instance or return value of os.stat()).
444 '''
445 '''
445
446
446 def fwarn(f, msg):
447 def fwarn(f, msg):
447 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
448 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
448 return False
449 return False
449
450
450 def badtype(mode):
451 def badtype(mode):
451 kind = _('unknown')
452 kind = _('unknown')
452 if stat.S_ISCHR(mode):
453 if stat.S_ISCHR(mode):
453 kind = _('character device')
454 kind = _('character device')
454 elif stat.S_ISBLK(mode):
455 elif stat.S_ISBLK(mode):
455 kind = _('block device')
456 kind = _('block device')
456 elif stat.S_ISFIFO(mode):
457 elif stat.S_ISFIFO(mode):
457 kind = _('fifo')
458 kind = _('fifo')
458 elif stat.S_ISSOCK(mode):
459 elif stat.S_ISSOCK(mode):
459 kind = _('socket')
460 kind = _('socket')
460 elif stat.S_ISDIR(mode):
461 elif stat.S_ISDIR(mode):
461 kind = _('directory')
462 kind = _('directory')
462 return _('unsupported file type (type is %s)') % kind
463 return _('unsupported file type (type is %s)') % kind
463
464
464 ignore = self._ignore
465 ignore = self._ignore
465 dirignore = self._dirignore
466 dirignore = self._dirignore
466 if ignored:
467 if ignored:
467 ignore = util.never
468 ignore = util.never
468 dirignore = util.never
469 dirignore = util.never
469 elif not unknown:
470 elif not unknown:
470 # if unknown and ignored are False, skip step 2
471 # if unknown and ignored are False, skip step 2
471 ignore = util.always
472 ignore = util.always
472 dirignore = util.always
473 dirignore = util.always
473
474
474 matchfn = match.matchfn
475 matchfn = match.matchfn
475 badfn = match.bad
476 badfn = match.bad
476 dmap = self._map
477 dmap = self._map
477 normpath = util.normpath
478 normpath = util.normpath
478 listdir = osutil.listdir
479 listdir = osutil.listdir
479 lstat = os.lstat
480 lstat = os.lstat
480 getkind = stat.S_IFMT
481 getkind = stat.S_IFMT
481 dirkind = stat.S_IFDIR
482 dirkind = stat.S_IFDIR
482 regkind = stat.S_IFREG
483 regkind = stat.S_IFREG
483 lnkkind = stat.S_IFLNK
484 lnkkind = stat.S_IFLNK
484 join = self._join
485 join = self._join
485 work = []
486 work = []
486 wadd = work.append
487 wadd = work.append
487
488
488 exact = skipstep3 = False
489 exact = skipstep3 = False
489 if matchfn == match.exact: # match.exact
490 if matchfn == match.exact: # match.exact
490 exact = True
491 exact = True
491 dirignore = util.always # skip step 2
492 dirignore = util.always # skip step 2
492 elif match.files() and not match.anypats(): # match.match, no patterns
493 elif match.files() and not match.anypats(): # match.match, no patterns
493 skipstep3 = True
494 skipstep3 = True
494
495
495 if self._checkcase:
496 if self._checkcase:
496 normalize = self._normalize
497 normalize = self._normalize
497 skipstep3 = False
498 skipstep3 = False
498 else:
499 else:
499 normalize = lambda x, y: x
500 normalize = lambda x, y: x
500
501
501 files = sorted(match.files())
502 files = sorted(match.files())
502 subrepos.sort()
503 subrepos.sort()
503 i, j = 0, 0
504 i, j = 0, 0
504 while i < len(files) and j < len(subrepos):
505 while i < len(files) and j < len(subrepos):
505 subpath = subrepos[j] + "/"
506 subpath = subrepos[j] + "/"
506 if not files[i].startswith(subpath):
507 if not files[i].startswith(subpath):
507 i += 1
508 i += 1
508 continue
509 continue
509 while files and files[i].startswith(subpath):
510 while files and files[i].startswith(subpath):
510 del files[i]
511 del files[i]
511 j += 1
512 j += 1
512
513
513 if not files or '.' in files:
514 if not files or '.' in files:
514 files = ['']
515 files = ['']
515 results = dict.fromkeys(subrepos)
516 results = dict.fromkeys(subrepos)
516 results['.hg'] = None
517 results['.hg'] = None
517
518
518 # step 1: find all explicit files
519 # step 1: find all explicit files
519 for ff in files:
520 for ff in files:
520 nf = normalize(normpath(ff), False)
521 nf = normalize(normpath(ff), False)
521 if nf in results:
522 if nf in results:
522 continue
523 continue
523
524
524 try:
525 try:
525 st = lstat(join(nf))
526 st = lstat(join(nf))
526 kind = getkind(st.st_mode)
527 kind = getkind(st.st_mode)
527 if kind == dirkind:
528 if kind == dirkind:
528 skipstep3 = False
529 skipstep3 = False
529 if nf in dmap:
530 if nf in dmap:
530 #file deleted on disk but still in dirstate
531 #file deleted on disk but still in dirstate
531 results[nf] = None
532 results[nf] = None
532 match.dir(nf)
533 match.dir(nf)
533 if not dirignore(nf):
534 if not dirignore(nf):
534 wadd(nf)
535 wadd(nf)
535 elif kind == regkind or kind == lnkkind:
536 elif kind == regkind or kind == lnkkind:
536 results[nf] = st
537 results[nf] = st
537 else:
538 else:
538 badfn(ff, badtype(kind))
539 badfn(ff, badtype(kind))
539 if nf in dmap:
540 if nf in dmap:
540 results[nf] = None
541 results[nf] = None
541 except OSError, inst:
542 except OSError, inst:
542 if nf in dmap: # does it exactly match a file?
543 if nf in dmap: # does it exactly match a file?
543 results[nf] = None
544 results[nf] = None
544 else: # does it match a directory?
545 else: # does it match a directory?
545 prefix = nf + "/"
546 prefix = nf + "/"
546 for fn in dmap:
547 for fn in dmap:
547 if fn.startswith(prefix):
548 if fn.startswith(prefix):
548 match.dir(nf)
549 match.dir(nf)
549 skipstep3 = False
550 skipstep3 = False
550 break
551 break
551 else:
552 else:
552 badfn(ff, inst.strerror)
553 badfn(ff, inst.strerror)
553
554
554 # step 2: visit subdirectories
555 # step 2: visit subdirectories
555 while work:
556 while work:
556 nd = work.pop()
557 nd = work.pop()
557 skip = None
558 skip = None
558 if nd == '.':
559 if nd == '.':
559 nd = ''
560 nd = ''
560 else:
561 else:
561 skip = '.hg'
562 skip = '.hg'
562 try:
563 try:
563 entries = listdir(join(nd), stat=True, skip=skip)
564 entries = listdir(join(nd), stat=True, skip=skip)
564 except OSError, inst:
565 except OSError, inst:
565 if inst.errno == errno.EACCES:
566 if inst.errno == errno.EACCES:
566 fwarn(nd, inst.strerror)
567 fwarn(nd, inst.strerror)
567 continue
568 continue
568 raise
569 raise
569 for f, kind, st in entries:
570 for f, kind, st in entries:
570 nf = normalize(nd and (nd + "/" + f) or f, True)
571 nf = normalize(nd and (nd + "/" + f) or f, True)
571 if nf not in results:
572 if nf not in results:
572 if kind == dirkind:
573 if kind == dirkind:
573 if not ignore(nf):
574 if not ignore(nf):
574 match.dir(nf)
575 match.dir(nf)
575 wadd(nf)
576 wadd(nf)
576 if nf in dmap and matchfn(nf):
577 if nf in dmap and matchfn(nf):
577 results[nf] = None
578 results[nf] = None
578 elif kind == regkind or kind == lnkkind:
579 elif kind == regkind or kind == lnkkind:
579 if nf in dmap:
580 if nf in dmap:
580 if matchfn(nf):
581 if matchfn(nf):
581 results[nf] = st
582 results[nf] = st
582 elif matchfn(nf) and not ignore(nf):
583 elif matchfn(nf) and not ignore(nf):
583 results[nf] = st
584 results[nf] = st
584 elif nf in dmap and matchfn(nf):
585 elif nf in dmap and matchfn(nf):
585 results[nf] = None
586 results[nf] = None
586
587
587 # step 3: report unseen items in the dmap hash
588 # step 3: report unseen items in the dmap hash
588 if not skipstep3 and not exact:
589 if not skipstep3 and not exact:
589 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
590 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
590 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
591 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
591 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
592 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
592 st = None
593 st = None
593 results[nf] = st
594 results[nf] = st
594 for s in subrepos:
595 for s in subrepos:
595 del results[s]
596 del results[s]
596 del results['.hg']
597 del results['.hg']
597 return results
598 return results
598
599
599 def status(self, match, subrepos, ignored, clean, unknown):
600 def status(self, match, subrepos, ignored, clean, unknown):
600 '''Determine the status of the working copy relative to the
601 '''Determine the status of the working copy relative to the
601 dirstate and return a tuple of lists (unsure, modified, added,
602 dirstate and return a tuple of lists (unsure, modified, added,
602 removed, deleted, unknown, ignored, clean), where:
603 removed, deleted, unknown, ignored, clean), where:
603
604
604 unsure:
605 unsure:
605 files that might have been modified since the dirstate was
606 files that might have been modified since the dirstate was
606 written, but need to be read to be sure (size is the same
607 written, but need to be read to be sure (size is the same
607 but mtime differs)
608 but mtime differs)
608 modified:
609 modified:
609 files that have definitely been modified since the dirstate
610 files that have definitely been modified since the dirstate
610 was written (different size or mode)
611 was written (different size or mode)
611 added:
612 added:
612 files that have been explicitly added with hg add
613 files that have been explicitly added with hg add
613 removed:
614 removed:
614 files that have been explicitly removed with hg remove
615 files that have been explicitly removed with hg remove
615 deleted:
616 deleted:
616 files that have been deleted through other means ("missing")
617 files that have been deleted through other means ("missing")
617 unknown:
618 unknown:
618 files not in the dirstate that are not ignored
619 files not in the dirstate that are not ignored
619 ignored:
620 ignored:
620 files not in the dirstate that are ignored
621 files not in the dirstate that are ignored
621 (by _dirignore())
622 (by _dirignore())
622 clean:
623 clean:
623 files that have definitely not been modified since the
624 files that have definitely not been modified since the
624 dirstate was written
625 dirstate was written
625 '''
626 '''
626 listignored, listclean, listunknown = ignored, clean, unknown
627 listignored, listclean, listunknown = ignored, clean, unknown
627 lookup, modified, added, unknown, ignored = [], [], [], [], []
628 lookup, modified, added, unknown, ignored = [], [], [], [], []
628 removed, deleted, clean = [], [], []
629 removed, deleted, clean = [], [], []
629
630
630 dmap = self._map
631 dmap = self._map
631 ladd = lookup.append # aka "unsure"
632 ladd = lookup.append # aka "unsure"
632 madd = modified.append
633 madd = modified.append
633 aadd = added.append
634 aadd = added.append
634 uadd = unknown.append
635 uadd = unknown.append
635 iadd = ignored.append
636 iadd = ignored.append
636 radd = removed.append
637 radd = removed.append
637 dadd = deleted.append
638 dadd = deleted.append
638 cadd = clean.append
639 cadd = clean.append
639
640
640 lnkkind = stat.S_IFLNK
641 lnkkind = stat.S_IFLNK
641
642
642 for fn, st in self.walk(match, subrepos, listunknown,
643 for fn, st in self.walk(match, subrepos, listunknown,
643 listignored).iteritems():
644 listignored).iteritems():
644 if fn not in dmap:
645 if fn not in dmap:
645 if (listignored or match.exact(fn)) and self._dirignore(fn):
646 if (listignored or match.exact(fn)) and self._dirignore(fn):
646 if listignored:
647 if listignored:
647 iadd(fn)
648 iadd(fn)
648 elif listunknown:
649 elif listunknown:
649 uadd(fn)
650 uadd(fn)
650 continue
651 continue
651
652
652 state, mode, size, time = dmap[fn]
653 state, mode, size, time = dmap[fn]
653
654
654 if not st and state in "nma":
655 if not st and state in "nma":
655 dadd(fn)
656 dadd(fn)
656 elif state == 'n':
657 elif state == 'n':
657 # The "mode & lnkkind != lnkkind or self._checklink"
658 # The "mode & lnkkind != lnkkind or self._checklink"
658 # lines are an expansion of "islink => checklink"
659 # lines are an expansion of "islink => checklink"
659 # where islink means "is this a link?" and checklink
660 # where islink means "is this a link?" and checklink
660 # means "can we check links?".
661 # means "can we check links?".
661 if (size >= 0 and
662 if (size >= 0 and
662 (size != st.st_size
663 (size != st.st_size
663 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
664 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
664 and (mode & lnkkind != lnkkind or self._checklink)
665 and (mode & lnkkind != lnkkind or self._checklink)
665 or size == -2 # other parent
666 or size == -2 # other parent
666 or fn in self._copymap):
667 or fn in self._copymap):
667 madd(fn)
668 madd(fn)
668 elif (time != int(st.st_mtime)
669 elif (time != int(st.st_mtime)
669 and (mode & lnkkind != lnkkind or self._checklink)):
670 and (mode & lnkkind != lnkkind or self._checklink)):
670 ladd(fn)
671 ladd(fn)
671 elif listclean:
672 elif listclean:
672 cadd(fn)
673 cadd(fn)
673 elif state == 'm':
674 elif state == 'm':
674 madd(fn)
675 madd(fn)
675 elif state == 'a':
676 elif state == 'a':
676 aadd(fn)
677 aadd(fn)
677 elif state == 'r':
678 elif state == 'r':
678 radd(fn)
679 radd(fn)
679
680
680 return (lookup, modified, added, removed, deleted, unknown, ignored,
681 return (lookup, modified, added, removed, deleted, unknown, ignored,
681 clean)
682 clean)
@@ -1,1904 +1,1916 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None # in UTF-8
108 self._branchcache = None # in UTF-8
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164
164
165 @propertycache
165 @propertycache
166 def changelog(self):
166 def changelog(self):
167 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
168 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
169 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
170 if p.startswith(self.root):
170 if p.startswith(self.root):
171 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
172 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
173 return c
173 return c
174
174
175 @propertycache
175 @propertycache
176 def manifest(self):
176 def manifest(self):
177 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
178
178
179 @propertycache
179 @propertycache
180 def dirstate(self):
180 def dirstate(self):
181 return dirstate.dirstate(self.opener, self.ui, self.root)
181 warned = [0]
182 def validate(node):
183 try:
184 r = self.changelog.rev(node)
185 return node
186 except error.LookupError:
187 if not warned[0]:
188 warned[0] = True
189 self.ui.warn(_("warning: ignoring unknown"
190 " working parent %s!\n" % short(node)))
191 return nullid
192
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
182
194
183 def __getitem__(self, changeid):
195 def __getitem__(self, changeid):
184 if changeid is None:
196 if changeid is None:
185 return context.workingctx(self)
197 return context.workingctx(self)
186 return context.changectx(self, changeid)
198 return context.changectx(self, changeid)
187
199
188 def __contains__(self, changeid):
200 def __contains__(self, changeid):
189 try:
201 try:
190 return bool(self.lookup(changeid))
202 return bool(self.lookup(changeid))
191 except error.RepoLookupError:
203 except error.RepoLookupError:
192 return False
204 return False
193
205
194 def __nonzero__(self):
206 def __nonzero__(self):
195 return True
207 return True
196
208
197 def __len__(self):
209 def __len__(self):
198 return len(self.changelog)
210 return len(self.changelog)
199
211
200 def __iter__(self):
212 def __iter__(self):
201 for i in xrange(len(self)):
213 for i in xrange(len(self)):
202 yield i
214 yield i
203
215
204 def url(self):
216 def url(self):
205 return 'file:' + self.root
217 return 'file:' + self.root
206
218
207 def hook(self, name, throw=False, **args):
219 def hook(self, name, throw=False, **args):
208 return hook.hook(self.ui, self, name, throw, **args)
220 return hook.hook(self.ui, self, name, throw, **args)
209
221
210 tag_disallowed = ':\r\n'
222 tag_disallowed = ':\r\n'
211
223
212 def _tag(self, names, node, message, local, user, date, extra={}):
224 def _tag(self, names, node, message, local, user, date, extra={}):
213 if isinstance(names, str):
225 if isinstance(names, str):
214 allchars = names
226 allchars = names
215 names = (names,)
227 names = (names,)
216 else:
228 else:
217 allchars = ''.join(names)
229 allchars = ''.join(names)
218 for c in self.tag_disallowed:
230 for c in self.tag_disallowed:
219 if c in allchars:
231 if c in allchars:
220 raise util.Abort(_('%r cannot be used in a tag name') % c)
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
221
233
222 branches = self.branchmap()
234 branches = self.branchmap()
223 for name in names:
235 for name in names:
224 self.hook('pretag', throw=True, node=hex(node), tag=name,
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
225 local=local)
237 local=local)
226 if name in branches:
238 if name in branches:
227 self.ui.warn(_("warning: tag %s conflicts with existing"
239 self.ui.warn(_("warning: tag %s conflicts with existing"
228 " branch name\n") % name)
240 " branch name\n") % name)
229
241
230 def writetags(fp, names, munge, prevtags):
242 def writetags(fp, names, munge, prevtags):
231 fp.seek(0, 2)
243 fp.seek(0, 2)
232 if prevtags and prevtags[-1] != '\n':
244 if prevtags and prevtags[-1] != '\n':
233 fp.write('\n')
245 fp.write('\n')
234 for name in names:
246 for name in names:
235 m = munge and munge(name) or name
247 m = munge and munge(name) or name
236 if self._tagtypes and name in self._tagtypes:
248 if self._tagtypes and name in self._tagtypes:
237 old = self._tags.get(name, nullid)
249 old = self._tags.get(name, nullid)
238 fp.write('%s %s\n' % (hex(old), m))
250 fp.write('%s %s\n' % (hex(old), m))
239 fp.write('%s %s\n' % (hex(node), m))
251 fp.write('%s %s\n' % (hex(node), m))
240 fp.close()
252 fp.close()
241
253
242 prevtags = ''
254 prevtags = ''
243 if local:
255 if local:
244 try:
256 try:
245 fp = self.opener('localtags', 'r+')
257 fp = self.opener('localtags', 'r+')
246 except IOError:
258 except IOError:
247 fp = self.opener('localtags', 'a')
259 fp = self.opener('localtags', 'a')
248 else:
260 else:
249 prevtags = fp.read()
261 prevtags = fp.read()
250
262
251 # local tags are stored in the current charset
263 # local tags are stored in the current charset
252 writetags(fp, names, None, prevtags)
264 writetags(fp, names, None, prevtags)
253 for name in names:
265 for name in names:
254 self.hook('tag', node=hex(node), tag=name, local=local)
266 self.hook('tag', node=hex(node), tag=name, local=local)
255 return
267 return
256
268
257 try:
269 try:
258 fp = self.wfile('.hgtags', 'rb+')
270 fp = self.wfile('.hgtags', 'rb+')
259 except IOError:
271 except IOError:
260 fp = self.wfile('.hgtags', 'ab')
272 fp = self.wfile('.hgtags', 'ab')
261 else:
273 else:
262 prevtags = fp.read()
274 prevtags = fp.read()
263
275
264 # committed tags are stored in UTF-8
276 # committed tags are stored in UTF-8
265 writetags(fp, names, encoding.fromlocal, prevtags)
277 writetags(fp, names, encoding.fromlocal, prevtags)
266
278
267 if '.hgtags' not in self.dirstate:
279 if '.hgtags' not in self.dirstate:
268 self[None].add(['.hgtags'])
280 self[None].add(['.hgtags'])
269
281
270 m = matchmod.exact(self.root, '', ['.hgtags'])
282 m = matchmod.exact(self.root, '', ['.hgtags'])
271 tagnode = self.commit(message, user, date, extra=extra, match=m)
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
272
284
273 for name in names:
285 for name in names:
274 self.hook('tag', node=hex(node), tag=name, local=local)
286 self.hook('tag', node=hex(node), tag=name, local=local)
275
287
276 return tagnode
288 return tagnode
277
289
278 def tag(self, names, node, message, local, user, date):
290 def tag(self, names, node, message, local, user, date):
279 '''tag a revision with one or more symbolic names.
291 '''tag a revision with one or more symbolic names.
280
292
281 names is a list of strings or, when adding a single tag, names may be a
293 names is a list of strings or, when adding a single tag, names may be a
282 string.
294 string.
283
295
284 if local is True, the tags are stored in a per-repository file.
296 if local is True, the tags are stored in a per-repository file.
285 otherwise, they are stored in the .hgtags file, and a new
297 otherwise, they are stored in the .hgtags file, and a new
286 changeset is committed with the change.
298 changeset is committed with the change.
287
299
288 keyword arguments:
300 keyword arguments:
289
301
290 local: whether to store tags in non-version-controlled file
302 local: whether to store tags in non-version-controlled file
291 (default False)
303 (default False)
292
304
293 message: commit message to use if committing
305 message: commit message to use if committing
294
306
295 user: name of user to use if committing
307 user: name of user to use if committing
296
308
297 date: date tuple to use if committing'''
309 date: date tuple to use if committing'''
298
310
299 for x in self.status()[:5]:
311 for x in self.status()[:5]:
300 if '.hgtags' in x:
312 if '.hgtags' in x:
301 raise util.Abort(_('working copy of .hgtags is changed '
313 raise util.Abort(_('working copy of .hgtags is changed '
302 '(please commit .hgtags manually)'))
314 '(please commit .hgtags manually)'))
303
315
304 self.tags() # instantiate the cache
316 self.tags() # instantiate the cache
305 self._tag(names, node, message, local, user, date)
317 self._tag(names, node, message, local, user, date)
306
318
307 def tags(self):
319 def tags(self):
308 '''return a mapping of tag to node'''
320 '''return a mapping of tag to node'''
309 if self._tags is None:
321 if self._tags is None:
310 (self._tags, self._tagtypes) = self._findtags()
322 (self._tags, self._tagtypes) = self._findtags()
311
323
312 return self._tags
324 return self._tags
313
325
314 def _findtags(self):
326 def _findtags(self):
315 '''Do the hard work of finding tags. Return a pair of dicts
327 '''Do the hard work of finding tags. Return a pair of dicts
316 (tags, tagtypes) where tags maps tag name to node, and tagtypes
328 (tags, tagtypes) where tags maps tag name to node, and tagtypes
317 maps tag name to a string like \'global\' or \'local\'.
329 maps tag name to a string like \'global\' or \'local\'.
318 Subclasses or extensions are free to add their own tags, but
330 Subclasses or extensions are free to add their own tags, but
319 should be aware that the returned dicts will be retained for the
331 should be aware that the returned dicts will be retained for the
320 duration of the localrepo object.'''
332 duration of the localrepo object.'''
321
333
322 # XXX what tagtype should subclasses/extensions use? Currently
334 # XXX what tagtype should subclasses/extensions use? Currently
323 # mq and bookmarks add tags, but do not set the tagtype at all.
335 # mq and bookmarks add tags, but do not set the tagtype at all.
324 # Should each extension invent its own tag type? Should there
336 # Should each extension invent its own tag type? Should there
325 # be one tagtype for all such "virtual" tags? Or is the status
337 # be one tagtype for all such "virtual" tags? Or is the status
326 # quo fine?
338 # quo fine?
327
339
328 alltags = {} # map tag name to (node, hist)
340 alltags = {} # map tag name to (node, hist)
329 tagtypes = {}
341 tagtypes = {}
330
342
331 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
343 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
332 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
344 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
333
345
334 # Build the return dicts. Have to re-encode tag names because
346 # Build the return dicts. Have to re-encode tag names because
335 # the tags module always uses UTF-8 (in order not to lose info
347 # the tags module always uses UTF-8 (in order not to lose info
336 # writing to the cache), but the rest of Mercurial wants them in
348 # writing to the cache), but the rest of Mercurial wants them in
337 # local encoding.
349 # local encoding.
338 tags = {}
350 tags = {}
339 for (name, (node, hist)) in alltags.iteritems():
351 for (name, (node, hist)) in alltags.iteritems():
340 if node != nullid:
352 if node != nullid:
341 tags[encoding.tolocal(name)] = node
353 tags[encoding.tolocal(name)] = node
342 tags['tip'] = self.changelog.tip()
354 tags['tip'] = self.changelog.tip()
343 tagtypes = dict([(encoding.tolocal(name), value)
355 tagtypes = dict([(encoding.tolocal(name), value)
344 for (name, value) in tagtypes.iteritems()])
356 for (name, value) in tagtypes.iteritems()])
345 return (tags, tagtypes)
357 return (tags, tagtypes)
346
358
347 def tagtype(self, tagname):
359 def tagtype(self, tagname):
348 '''
360 '''
349 return the type of the given tag. result can be:
361 return the type of the given tag. result can be:
350
362
351 'local' : a local tag
363 'local' : a local tag
352 'global' : a global tag
364 'global' : a global tag
353 None : tag does not exist
365 None : tag does not exist
354 '''
366 '''
355
367
356 self.tags()
368 self.tags()
357
369
358 return self._tagtypes.get(tagname)
370 return self._tagtypes.get(tagname)
359
371
360 def tagslist(self):
372 def tagslist(self):
361 '''return a list of tags ordered by revision'''
373 '''return a list of tags ordered by revision'''
362 l = []
374 l = []
363 for t, n in self.tags().iteritems():
375 for t, n in self.tags().iteritems():
364 try:
376 try:
365 r = self.changelog.rev(n)
377 r = self.changelog.rev(n)
366 except:
378 except:
367 r = -2 # sort to the beginning of the list if unknown
379 r = -2 # sort to the beginning of the list if unknown
368 l.append((r, t, n))
380 l.append((r, t, n))
369 return [(t, n) for r, t, n in sorted(l)]
381 return [(t, n) for r, t, n in sorted(l)]
370
382
371 def nodetags(self, node):
383 def nodetags(self, node):
372 '''return the tags associated with a node'''
384 '''return the tags associated with a node'''
373 if not self.nodetagscache:
385 if not self.nodetagscache:
374 self.nodetagscache = {}
386 self.nodetagscache = {}
375 for t, n in self.tags().iteritems():
387 for t, n in self.tags().iteritems():
376 self.nodetagscache.setdefault(n, []).append(t)
388 self.nodetagscache.setdefault(n, []).append(t)
377 for tags in self.nodetagscache.itervalues():
389 for tags in self.nodetagscache.itervalues():
378 tags.sort()
390 tags.sort()
379 return self.nodetagscache.get(node, [])
391 return self.nodetagscache.get(node, [])
380
392
381 def _branchtags(self, partial, lrev):
393 def _branchtags(self, partial, lrev):
382 # TODO: rename this function?
394 # TODO: rename this function?
383 tiprev = len(self) - 1
395 tiprev = len(self) - 1
384 if lrev != tiprev:
396 if lrev != tiprev:
385 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
397 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
386 self._updatebranchcache(partial, ctxgen)
398 self._updatebranchcache(partial, ctxgen)
387 self._writebranchcache(partial, self.changelog.tip(), tiprev)
399 self._writebranchcache(partial, self.changelog.tip(), tiprev)
388
400
389 return partial
401 return partial
390
402
391 def updatebranchcache(self):
403 def updatebranchcache(self):
392 tip = self.changelog.tip()
404 tip = self.changelog.tip()
393 if self._branchcache is not None and self._branchcachetip == tip:
405 if self._branchcache is not None and self._branchcachetip == tip:
394 return self._branchcache
406 return self._branchcache
395
407
396 oldtip = self._branchcachetip
408 oldtip = self._branchcachetip
397 self._branchcachetip = tip
409 self._branchcachetip = tip
398 if oldtip is None or oldtip not in self.changelog.nodemap:
410 if oldtip is None or oldtip not in self.changelog.nodemap:
399 partial, last, lrev = self._readbranchcache()
411 partial, last, lrev = self._readbranchcache()
400 else:
412 else:
401 lrev = self.changelog.rev(oldtip)
413 lrev = self.changelog.rev(oldtip)
402 partial = self._branchcache
414 partial = self._branchcache
403
415
404 self._branchtags(partial, lrev)
416 self._branchtags(partial, lrev)
405 # this private cache holds all heads (not just tips)
417 # this private cache holds all heads (not just tips)
406 self._branchcache = partial
418 self._branchcache = partial
407
419
408 def branchmap(self):
420 def branchmap(self):
409 '''returns a dictionary {branch: [branchheads]}'''
421 '''returns a dictionary {branch: [branchheads]}'''
410 self.updatebranchcache()
422 self.updatebranchcache()
411 return self._branchcache
423 return self._branchcache
412
424
413 def branchtags(self):
425 def branchtags(self):
414 '''return a dict where branch names map to the tipmost head of
426 '''return a dict where branch names map to the tipmost head of
415 the branch, open heads come before closed'''
427 the branch, open heads come before closed'''
416 bt = {}
428 bt = {}
417 for bn, heads in self.branchmap().iteritems():
429 for bn, heads in self.branchmap().iteritems():
418 tip = heads[-1]
430 tip = heads[-1]
419 for h in reversed(heads):
431 for h in reversed(heads):
420 if 'close' not in self.changelog.read(h)[5]:
432 if 'close' not in self.changelog.read(h)[5]:
421 tip = h
433 tip = h
422 break
434 break
423 bt[bn] = tip
435 bt[bn] = tip
424 return bt
436 return bt
425
437
426
438
427 def _readbranchcache(self):
439 def _readbranchcache(self):
428 partial = {}
440 partial = {}
429 try:
441 try:
430 f = self.opener("branchheads.cache")
442 f = self.opener("branchheads.cache")
431 lines = f.read().split('\n')
443 lines = f.read().split('\n')
432 f.close()
444 f.close()
433 except (IOError, OSError):
445 except (IOError, OSError):
434 return {}, nullid, nullrev
446 return {}, nullid, nullrev
435
447
436 try:
448 try:
437 last, lrev = lines.pop(0).split(" ", 1)
449 last, lrev = lines.pop(0).split(" ", 1)
438 last, lrev = bin(last), int(lrev)
450 last, lrev = bin(last), int(lrev)
439 if lrev >= len(self) or self[lrev].node() != last:
451 if lrev >= len(self) or self[lrev].node() != last:
440 # invalidate the cache
452 # invalidate the cache
441 raise ValueError('invalidating branch cache (tip differs)')
453 raise ValueError('invalidating branch cache (tip differs)')
442 for l in lines:
454 for l in lines:
443 if not l:
455 if not l:
444 continue
456 continue
445 node, label = l.split(" ", 1)
457 node, label = l.split(" ", 1)
446 partial.setdefault(label.strip(), []).append(bin(node))
458 partial.setdefault(label.strip(), []).append(bin(node))
447 except KeyboardInterrupt:
459 except KeyboardInterrupt:
448 raise
460 raise
449 except Exception, inst:
461 except Exception, inst:
450 if self.ui.debugflag:
462 if self.ui.debugflag:
451 self.ui.warn(str(inst), '\n')
463 self.ui.warn(str(inst), '\n')
452 partial, last, lrev = {}, nullid, nullrev
464 partial, last, lrev = {}, nullid, nullrev
453 return partial, last, lrev
465 return partial, last, lrev
454
466
455 def _writebranchcache(self, branches, tip, tiprev):
467 def _writebranchcache(self, branches, tip, tiprev):
456 try:
468 try:
457 f = self.opener("branchheads.cache", "w", atomictemp=True)
469 f = self.opener("branchheads.cache", "w", atomictemp=True)
458 f.write("%s %s\n" % (hex(tip), tiprev))
470 f.write("%s %s\n" % (hex(tip), tiprev))
459 for label, nodes in branches.iteritems():
471 for label, nodes in branches.iteritems():
460 for node in nodes:
472 for node in nodes:
461 f.write("%s %s\n" % (hex(node), label))
473 f.write("%s %s\n" % (hex(node), label))
462 f.rename()
474 f.rename()
463 except (IOError, OSError):
475 except (IOError, OSError):
464 pass
476 pass
465
477
466 def _updatebranchcache(self, partial, ctxgen):
478 def _updatebranchcache(self, partial, ctxgen):
467 # collect new branch entries
479 # collect new branch entries
468 newbranches = {}
480 newbranches = {}
469 for c in ctxgen:
481 for c in ctxgen:
470 newbranches.setdefault(c.branch(), []).append(c.node())
482 newbranches.setdefault(c.branch(), []).append(c.node())
471 # if older branchheads are reachable from new ones, they aren't
483 # if older branchheads are reachable from new ones, they aren't
472 # really branchheads. Note checking parents is insufficient:
484 # really branchheads. Note checking parents is insufficient:
473 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
485 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
474 for branch, newnodes in newbranches.iteritems():
486 for branch, newnodes in newbranches.iteritems():
475 bheads = partial.setdefault(branch, [])
487 bheads = partial.setdefault(branch, [])
476 bheads.extend(newnodes)
488 bheads.extend(newnodes)
477 if len(bheads) <= 1:
489 if len(bheads) <= 1:
478 continue
490 continue
479 # starting from tip means fewer passes over reachable
491 # starting from tip means fewer passes over reachable
480 while newnodes:
492 while newnodes:
481 latest = newnodes.pop()
493 latest = newnodes.pop()
482 if latest not in bheads:
494 if latest not in bheads:
483 continue
495 continue
484 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
496 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
485 reachable = self.changelog.reachable(latest, minbhrev)
497 reachable = self.changelog.reachable(latest, minbhrev)
486 reachable.remove(latest)
498 reachable.remove(latest)
487 bheads = [b for b in bheads if b not in reachable]
499 bheads = [b for b in bheads if b not in reachable]
488 partial[branch] = bheads
500 partial[branch] = bheads
489
501
490 def lookup(self, key):
502 def lookup(self, key):
491 if isinstance(key, int):
503 if isinstance(key, int):
492 return self.changelog.node(key)
504 return self.changelog.node(key)
493 elif key == '.':
505 elif key == '.':
494 return self.dirstate.parents()[0]
506 return self.dirstate.parents()[0]
495 elif key == 'null':
507 elif key == 'null':
496 return nullid
508 return nullid
497 elif key == 'tip':
509 elif key == 'tip':
498 return self.changelog.tip()
510 return self.changelog.tip()
499 n = self.changelog._match(key)
511 n = self.changelog._match(key)
500 if n:
512 if n:
501 return n
513 return n
502 if key in self.tags():
514 if key in self.tags():
503 return self.tags()[key]
515 return self.tags()[key]
504 if key in self.branchtags():
516 if key in self.branchtags():
505 return self.branchtags()[key]
517 return self.branchtags()[key]
506 n = self.changelog._partialmatch(key)
518 n = self.changelog._partialmatch(key)
507 if n:
519 if n:
508 return n
520 return n
509
521
510 # can't find key, check if it might have come from damaged dirstate
522 # can't find key, check if it might have come from damaged dirstate
511 if key in self.dirstate.parents():
523 if key in self.dirstate.parents():
512 raise error.Abort(_("working directory has unknown parent '%s'!")
524 raise error.Abort(_("working directory has unknown parent '%s'!")
513 % short(key))
525 % short(key))
514 try:
526 try:
515 if len(key) == 20:
527 if len(key) == 20:
516 key = hex(key)
528 key = hex(key)
517 except:
529 except:
518 pass
530 pass
519 raise error.RepoLookupError(_("unknown revision '%s'") % key)
531 raise error.RepoLookupError(_("unknown revision '%s'") % key)
520
532
521 def lookupbranch(self, key, remote=None):
533 def lookupbranch(self, key, remote=None):
522 repo = remote or self
534 repo = remote or self
523 if key in repo.branchmap():
535 if key in repo.branchmap():
524 return key
536 return key
525
537
526 repo = (remote and remote.local()) and remote or self
538 repo = (remote and remote.local()) and remote or self
527 return repo[key].branch()
539 return repo[key].branch()
528
540
529 def local(self):
541 def local(self):
530 return True
542 return True
531
543
532 def join(self, f):
544 def join(self, f):
533 return os.path.join(self.path, f)
545 return os.path.join(self.path, f)
534
546
535 def wjoin(self, f):
547 def wjoin(self, f):
536 return os.path.join(self.root, f)
548 return os.path.join(self.root, f)
537
549
538 def file(self, f):
550 def file(self, f):
539 if f[0] == '/':
551 if f[0] == '/':
540 f = f[1:]
552 f = f[1:]
541 return filelog.filelog(self.sopener, f)
553 return filelog.filelog(self.sopener, f)
542
554
543 def changectx(self, changeid):
555 def changectx(self, changeid):
544 return self[changeid]
556 return self[changeid]
545
557
546 def parents(self, changeid=None):
558 def parents(self, changeid=None):
547 '''get list of changectxs for parents of changeid'''
559 '''get list of changectxs for parents of changeid'''
548 return self[changeid].parents()
560 return self[changeid].parents()
549
561
550 def filectx(self, path, changeid=None, fileid=None):
562 def filectx(self, path, changeid=None, fileid=None):
551 """changeid can be a changeset revision, node, or tag.
563 """changeid can be a changeset revision, node, or tag.
552 fileid can be a file revision or node."""
564 fileid can be a file revision or node."""
553 return context.filectx(self, path, changeid, fileid)
565 return context.filectx(self, path, changeid, fileid)
554
566
555 def getcwd(self):
567 def getcwd(self):
556 return self.dirstate.getcwd()
568 return self.dirstate.getcwd()
557
569
558 def pathto(self, f, cwd=None):
570 def pathto(self, f, cwd=None):
559 return self.dirstate.pathto(f, cwd)
571 return self.dirstate.pathto(f, cwd)
560
572
561 def wfile(self, f, mode='r'):
573 def wfile(self, f, mode='r'):
562 return self.wopener(f, mode)
574 return self.wopener(f, mode)
563
575
564 def _link(self, f):
576 def _link(self, f):
565 return os.path.islink(self.wjoin(f))
577 return os.path.islink(self.wjoin(f))
566
578
567 def _loadfilter(self, filter):
579 def _loadfilter(self, filter):
568 if filter not in self.filterpats:
580 if filter not in self.filterpats:
569 l = []
581 l = []
570 for pat, cmd in self.ui.configitems(filter):
582 for pat, cmd in self.ui.configitems(filter):
571 if cmd == '!':
583 if cmd == '!':
572 continue
584 continue
573 mf = matchmod.match(self.root, '', [pat])
585 mf = matchmod.match(self.root, '', [pat])
574 fn = None
586 fn = None
575 params = cmd
587 params = cmd
576 for name, filterfn in self._datafilters.iteritems():
588 for name, filterfn in self._datafilters.iteritems():
577 if cmd.startswith(name):
589 if cmd.startswith(name):
578 fn = filterfn
590 fn = filterfn
579 params = cmd[len(name):].lstrip()
591 params = cmd[len(name):].lstrip()
580 break
592 break
581 if not fn:
593 if not fn:
582 fn = lambda s, c, **kwargs: util.filter(s, c)
594 fn = lambda s, c, **kwargs: util.filter(s, c)
583 # Wrap old filters not supporting keyword arguments
595 # Wrap old filters not supporting keyword arguments
584 if not inspect.getargspec(fn)[2]:
596 if not inspect.getargspec(fn)[2]:
585 oldfn = fn
597 oldfn = fn
586 fn = lambda s, c, **kwargs: oldfn(s, c)
598 fn = lambda s, c, **kwargs: oldfn(s, c)
587 l.append((mf, fn, params))
599 l.append((mf, fn, params))
588 self.filterpats[filter] = l
600 self.filterpats[filter] = l
589 return self.filterpats[filter]
601 return self.filterpats[filter]
590
602
591 def _filter(self, filterpats, filename, data):
603 def _filter(self, filterpats, filename, data):
592 for mf, fn, cmd in filterpats:
604 for mf, fn, cmd in filterpats:
593 if mf(filename):
605 if mf(filename):
594 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
606 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
595 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
607 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
596 break
608 break
597
609
598 return data
610 return data
599
611
600 @propertycache
612 @propertycache
601 def _encodefilterpats(self):
613 def _encodefilterpats(self):
602 return self._loadfilter('encode')
614 return self._loadfilter('encode')
603
615
604 @propertycache
616 @propertycache
605 def _decodefilterpats(self):
617 def _decodefilterpats(self):
606 return self._loadfilter('decode')
618 return self._loadfilter('decode')
607
619
608 def adddatafilter(self, name, filter):
620 def adddatafilter(self, name, filter):
609 self._datafilters[name] = filter
621 self._datafilters[name] = filter
610
622
611 def wread(self, filename):
623 def wread(self, filename):
612 if self._link(filename):
624 if self._link(filename):
613 data = os.readlink(self.wjoin(filename))
625 data = os.readlink(self.wjoin(filename))
614 else:
626 else:
615 data = self.wopener(filename, 'r').read()
627 data = self.wopener(filename, 'r').read()
616 return self._filter(self._encodefilterpats, filename, data)
628 return self._filter(self._encodefilterpats, filename, data)
617
629
618 def wwrite(self, filename, data, flags):
630 def wwrite(self, filename, data, flags):
619 data = self._filter(self._decodefilterpats, filename, data)
631 data = self._filter(self._decodefilterpats, filename, data)
620 try:
632 try:
621 os.unlink(self.wjoin(filename))
633 os.unlink(self.wjoin(filename))
622 except OSError:
634 except OSError:
623 pass
635 pass
624 if 'l' in flags:
636 if 'l' in flags:
625 self.wopener.symlink(data, filename)
637 self.wopener.symlink(data, filename)
626 else:
638 else:
627 self.wopener(filename, 'w').write(data)
639 self.wopener(filename, 'w').write(data)
628 if 'x' in flags:
640 if 'x' in flags:
629 util.set_flags(self.wjoin(filename), False, True)
641 util.set_flags(self.wjoin(filename), False, True)
630
642
631 def wwritedata(self, filename, data):
643 def wwritedata(self, filename, data):
632 return self._filter(self._decodefilterpats, filename, data)
644 return self._filter(self._decodefilterpats, filename, data)
633
645
634 def transaction(self, desc):
646 def transaction(self, desc):
635 tr = self._transref and self._transref() or None
647 tr = self._transref and self._transref() or None
636 if tr and tr.running():
648 if tr and tr.running():
637 return tr.nest()
649 return tr.nest()
638
650
639 # abort here if the journal already exists
651 # abort here if the journal already exists
640 if os.path.exists(self.sjoin("journal")):
652 if os.path.exists(self.sjoin("journal")):
641 raise error.RepoError(
653 raise error.RepoError(
642 _("abandoned transaction found - run hg recover"))
654 _("abandoned transaction found - run hg recover"))
643
655
644 # save dirstate for rollback
656 # save dirstate for rollback
645 try:
657 try:
646 ds = self.opener("dirstate").read()
658 ds = self.opener("dirstate").read()
647 except IOError:
659 except IOError:
648 ds = ""
660 ds = ""
649 self.opener("journal.dirstate", "w").write(ds)
661 self.opener("journal.dirstate", "w").write(ds)
650 self.opener("journal.branch", "w").write(self.dirstate.branch())
662 self.opener("journal.branch", "w").write(self.dirstate.branch())
651 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
663 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
652
664
653 renames = [(self.sjoin("journal"), self.sjoin("undo")),
665 renames = [(self.sjoin("journal"), self.sjoin("undo")),
654 (self.join("journal.dirstate"), self.join("undo.dirstate")),
666 (self.join("journal.dirstate"), self.join("undo.dirstate")),
655 (self.join("journal.branch"), self.join("undo.branch")),
667 (self.join("journal.branch"), self.join("undo.branch")),
656 (self.join("journal.desc"), self.join("undo.desc"))]
668 (self.join("journal.desc"), self.join("undo.desc"))]
657 tr = transaction.transaction(self.ui.warn, self.sopener,
669 tr = transaction.transaction(self.ui.warn, self.sopener,
658 self.sjoin("journal"),
670 self.sjoin("journal"),
659 aftertrans(renames),
671 aftertrans(renames),
660 self.store.createmode)
672 self.store.createmode)
661 self._transref = weakref.ref(tr)
673 self._transref = weakref.ref(tr)
662 return tr
674 return tr
663
675
664 def recover(self):
676 def recover(self):
665 lock = self.lock()
677 lock = self.lock()
666 try:
678 try:
667 if os.path.exists(self.sjoin("journal")):
679 if os.path.exists(self.sjoin("journal")):
668 self.ui.status(_("rolling back interrupted transaction\n"))
680 self.ui.status(_("rolling back interrupted transaction\n"))
669 transaction.rollback(self.sopener, self.sjoin("journal"),
681 transaction.rollback(self.sopener, self.sjoin("journal"),
670 self.ui.warn)
682 self.ui.warn)
671 self.invalidate()
683 self.invalidate()
672 return True
684 return True
673 else:
685 else:
674 self.ui.warn(_("no interrupted transaction available\n"))
686 self.ui.warn(_("no interrupted transaction available\n"))
675 return False
687 return False
676 finally:
688 finally:
677 lock.release()
689 lock.release()
678
690
679 def rollback(self, dryrun=False):
691 def rollback(self, dryrun=False):
680 wlock = lock = None
692 wlock = lock = None
681 try:
693 try:
682 wlock = self.wlock()
694 wlock = self.wlock()
683 lock = self.lock()
695 lock = self.lock()
684 if os.path.exists(self.sjoin("undo")):
696 if os.path.exists(self.sjoin("undo")):
685 try:
697 try:
686 args = self.opener("undo.desc", "r").read().splitlines()
698 args = self.opener("undo.desc", "r").read().splitlines()
687 if len(args) >= 3 and self.ui.verbose:
699 if len(args) >= 3 and self.ui.verbose:
688 desc = _("rolling back to revision %s"
700 desc = _("rolling back to revision %s"
689 " (undo %s: %s)\n") % (
701 " (undo %s: %s)\n") % (
690 int(args[0]) - 1, args[1], args[2])
702 int(args[0]) - 1, args[1], args[2])
691 elif len(args) >= 2:
703 elif len(args) >= 2:
692 desc = _("rolling back to revision %s (undo %s)\n") % (
704 desc = _("rolling back to revision %s (undo %s)\n") % (
693 int(args[0]) - 1, args[1])
705 int(args[0]) - 1, args[1])
694 except IOError:
706 except IOError:
695 desc = _("rolling back unknown transaction\n")
707 desc = _("rolling back unknown transaction\n")
696 self.ui.status(desc)
708 self.ui.status(desc)
697 if dryrun:
709 if dryrun:
698 return
710 return
699 transaction.rollback(self.sopener, self.sjoin("undo"),
711 transaction.rollback(self.sopener, self.sjoin("undo"),
700 self.ui.warn)
712 self.ui.warn)
701 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
713 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
702 try:
714 try:
703 branch = self.opener("undo.branch").read()
715 branch = self.opener("undo.branch").read()
704 self.dirstate.setbranch(branch)
716 self.dirstate.setbranch(branch)
705 except IOError:
717 except IOError:
706 self.ui.warn(_("Named branch could not be reset, "
718 self.ui.warn(_("Named branch could not be reset, "
707 "current branch still is: %s\n")
719 "current branch still is: %s\n")
708 % encoding.tolocal(self.dirstate.branch()))
720 % encoding.tolocal(self.dirstate.branch()))
709 self.invalidate()
721 self.invalidate()
710 self.dirstate.invalidate()
722 self.dirstate.invalidate()
711 self.destroyed()
723 self.destroyed()
712 else:
724 else:
713 self.ui.warn(_("no rollback information available\n"))
725 self.ui.warn(_("no rollback information available\n"))
714 return 1
726 return 1
715 finally:
727 finally:
716 release(lock, wlock)
728 release(lock, wlock)
717
729
718 def invalidatecaches(self):
730 def invalidatecaches(self):
719 self._tags = None
731 self._tags = None
720 self._tagtypes = None
732 self._tagtypes = None
721 self.nodetagscache = None
733 self.nodetagscache = None
722 self._branchcache = None # in UTF-8
734 self._branchcache = None # in UTF-8
723 self._branchcachetip = None
735 self._branchcachetip = None
724
736
725 def invalidate(self):
737 def invalidate(self):
726 for a in "changelog manifest".split():
738 for a in "changelog manifest".split():
727 if a in self.__dict__:
739 if a in self.__dict__:
728 delattr(self, a)
740 delattr(self, a)
729 self.invalidatecaches()
741 self.invalidatecaches()
730
742
731 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
743 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
732 try:
744 try:
733 l = lock.lock(lockname, 0, releasefn, desc=desc)
745 l = lock.lock(lockname, 0, releasefn, desc=desc)
734 except error.LockHeld, inst:
746 except error.LockHeld, inst:
735 if not wait:
747 if not wait:
736 raise
748 raise
737 self.ui.warn(_("waiting for lock on %s held by %r\n") %
749 self.ui.warn(_("waiting for lock on %s held by %r\n") %
738 (desc, inst.locker))
750 (desc, inst.locker))
739 # default to 600 seconds timeout
751 # default to 600 seconds timeout
740 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
752 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
741 releasefn, desc=desc)
753 releasefn, desc=desc)
742 if acquirefn:
754 if acquirefn:
743 acquirefn()
755 acquirefn()
744 return l
756 return l
745
757
746 def lock(self, wait=True):
758 def lock(self, wait=True):
747 '''Lock the repository store (.hg/store) and return a weak reference
759 '''Lock the repository store (.hg/store) and return a weak reference
748 to the lock. Use this before modifying the store (e.g. committing or
760 to the lock. Use this before modifying the store (e.g. committing or
749 stripping). If you are opening a transaction, get a lock as well.)'''
761 stripping). If you are opening a transaction, get a lock as well.)'''
750 l = self._lockref and self._lockref()
762 l = self._lockref and self._lockref()
751 if l is not None and l.held:
763 if l is not None and l.held:
752 l.lock()
764 l.lock()
753 return l
765 return l
754
766
755 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
767 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
756 _('repository %s') % self.origroot)
768 _('repository %s') % self.origroot)
757 self._lockref = weakref.ref(l)
769 self._lockref = weakref.ref(l)
758 return l
770 return l
759
771
760 def wlock(self, wait=True):
772 def wlock(self, wait=True):
761 '''Lock the non-store parts of the repository (everything under
773 '''Lock the non-store parts of the repository (everything under
762 .hg except .hg/store) and return a weak reference to the lock.
774 .hg except .hg/store) and return a weak reference to the lock.
763 Use this before modifying files in .hg.'''
775 Use this before modifying files in .hg.'''
764 l = self._wlockref and self._wlockref()
776 l = self._wlockref and self._wlockref()
765 if l is not None and l.held:
777 if l is not None and l.held:
766 l.lock()
778 l.lock()
767 return l
779 return l
768
780
769 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
781 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
770 self.dirstate.invalidate, _('working directory of %s') %
782 self.dirstate.invalidate, _('working directory of %s') %
771 self.origroot)
783 self.origroot)
772 self._wlockref = weakref.ref(l)
784 self._wlockref = weakref.ref(l)
773 return l
785 return l
774
786
775 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
787 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
776 """
788 """
777 commit an individual file as part of a larger transaction
789 commit an individual file as part of a larger transaction
778 """
790 """
779
791
780 fname = fctx.path()
792 fname = fctx.path()
781 text = fctx.data()
793 text = fctx.data()
782 flog = self.file(fname)
794 flog = self.file(fname)
783 fparent1 = manifest1.get(fname, nullid)
795 fparent1 = manifest1.get(fname, nullid)
784 fparent2 = fparent2o = manifest2.get(fname, nullid)
796 fparent2 = fparent2o = manifest2.get(fname, nullid)
785
797
786 meta = {}
798 meta = {}
787 copy = fctx.renamed()
799 copy = fctx.renamed()
788 if copy and copy[0] != fname:
800 if copy and copy[0] != fname:
789 # Mark the new revision of this file as a copy of another
801 # Mark the new revision of this file as a copy of another
790 # file. This copy data will effectively act as a parent
802 # file. This copy data will effectively act as a parent
791 # of this new revision. If this is a merge, the first
803 # of this new revision. If this is a merge, the first
792 # parent will be the nullid (meaning "look up the copy data")
804 # parent will be the nullid (meaning "look up the copy data")
793 # and the second one will be the other parent. For example:
805 # and the second one will be the other parent. For example:
794 #
806 #
795 # 0 --- 1 --- 3 rev1 changes file foo
807 # 0 --- 1 --- 3 rev1 changes file foo
796 # \ / rev2 renames foo to bar and changes it
808 # \ / rev2 renames foo to bar and changes it
797 # \- 2 -/ rev3 should have bar with all changes and
809 # \- 2 -/ rev3 should have bar with all changes and
798 # should record that bar descends from
810 # should record that bar descends from
799 # bar in rev2 and foo in rev1
811 # bar in rev2 and foo in rev1
800 #
812 #
801 # this allows this merge to succeed:
813 # this allows this merge to succeed:
802 #
814 #
803 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
815 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
804 # \ / merging rev3 and rev4 should use bar@rev2
816 # \ / merging rev3 and rev4 should use bar@rev2
805 # \- 2 --- 4 as the merge base
817 # \- 2 --- 4 as the merge base
806 #
818 #
807
819
808 cfname = copy[0]
820 cfname = copy[0]
809 crev = manifest1.get(cfname)
821 crev = manifest1.get(cfname)
810 newfparent = fparent2
822 newfparent = fparent2
811
823
812 if manifest2: # branch merge
824 if manifest2: # branch merge
813 if fparent2 == nullid or crev is None: # copied on remote side
825 if fparent2 == nullid or crev is None: # copied on remote side
814 if cfname in manifest2:
826 if cfname in manifest2:
815 crev = manifest2[cfname]
827 crev = manifest2[cfname]
816 newfparent = fparent1
828 newfparent = fparent1
817
829
818 # find source in nearest ancestor if we've lost track
830 # find source in nearest ancestor if we've lost track
819 if not crev:
831 if not crev:
820 self.ui.debug(" %s: searching for copy revision for %s\n" %
832 self.ui.debug(" %s: searching for copy revision for %s\n" %
821 (fname, cfname))
833 (fname, cfname))
822 for ancestor in self[None].ancestors():
834 for ancestor in self[None].ancestors():
823 if cfname in ancestor:
835 if cfname in ancestor:
824 crev = ancestor[cfname].filenode()
836 crev = ancestor[cfname].filenode()
825 break
837 break
826
838
827 if crev:
839 if crev:
828 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
840 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
829 meta["copy"] = cfname
841 meta["copy"] = cfname
830 meta["copyrev"] = hex(crev)
842 meta["copyrev"] = hex(crev)
831 fparent1, fparent2 = nullid, newfparent
843 fparent1, fparent2 = nullid, newfparent
832 else:
844 else:
833 self.ui.warn(_("warning: can't find ancestor for '%s' "
845 self.ui.warn(_("warning: can't find ancestor for '%s' "
834 "copied from '%s'!\n") % (fname, cfname))
846 "copied from '%s'!\n") % (fname, cfname))
835
847
836 elif fparent2 != nullid:
848 elif fparent2 != nullid:
837 # is one parent an ancestor of the other?
849 # is one parent an ancestor of the other?
838 fparentancestor = flog.ancestor(fparent1, fparent2)
850 fparentancestor = flog.ancestor(fparent1, fparent2)
839 if fparentancestor == fparent1:
851 if fparentancestor == fparent1:
840 fparent1, fparent2 = fparent2, nullid
852 fparent1, fparent2 = fparent2, nullid
841 elif fparentancestor == fparent2:
853 elif fparentancestor == fparent2:
842 fparent2 = nullid
854 fparent2 = nullid
843
855
844 # is the file changed?
856 # is the file changed?
845 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
857 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
846 changelist.append(fname)
858 changelist.append(fname)
847 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
859 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
848
860
849 # are just the flags changed during merge?
861 # are just the flags changed during merge?
850 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
862 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
851 changelist.append(fname)
863 changelist.append(fname)
852
864
853 return fparent1
865 return fparent1
854
866
855 def commit(self, text="", user=None, date=None, match=None, force=False,
867 def commit(self, text="", user=None, date=None, match=None, force=False,
856 editor=False, extra={}):
868 editor=False, extra={}):
857 """Add a new revision to current repository.
869 """Add a new revision to current repository.
858
870
859 Revision information is gathered from the working directory,
871 Revision information is gathered from the working directory,
860 match can be used to filter the committed files. If editor is
872 match can be used to filter the committed files. If editor is
861 supplied, it is called to get a commit message.
873 supplied, it is called to get a commit message.
862 """
874 """
863
875
864 def fail(f, msg):
876 def fail(f, msg):
865 raise util.Abort('%s: %s' % (f, msg))
877 raise util.Abort('%s: %s' % (f, msg))
866
878
867 if not match:
879 if not match:
868 match = matchmod.always(self.root, '')
880 match = matchmod.always(self.root, '')
869
881
870 if not force:
882 if not force:
871 vdirs = []
883 vdirs = []
872 match.dir = vdirs.append
884 match.dir = vdirs.append
873 match.bad = fail
885 match.bad = fail
874
886
875 wlock = self.wlock()
887 wlock = self.wlock()
876 try:
888 try:
877 wctx = self[None]
889 wctx = self[None]
878 merge = len(wctx.parents()) > 1
890 merge = len(wctx.parents()) > 1
879
891
880 if (not force and merge and match and
892 if (not force and merge and match and
881 (match.files() or match.anypats())):
893 (match.files() or match.anypats())):
882 raise util.Abort(_('cannot partially commit a merge '
894 raise util.Abort(_('cannot partially commit a merge '
883 '(do not specify files or patterns)'))
895 '(do not specify files or patterns)'))
884
896
885 changes = self.status(match=match, clean=force)
897 changes = self.status(match=match, clean=force)
886 if force:
898 if force:
887 changes[0].extend(changes[6]) # mq may commit unchanged files
899 changes[0].extend(changes[6]) # mq may commit unchanged files
888
900
889 # check subrepos
901 # check subrepos
890 subs = []
902 subs = []
891 removedsubs = set()
903 removedsubs = set()
892 for p in wctx.parents():
904 for p in wctx.parents():
893 removedsubs.update(s for s in p.substate if match(s))
905 removedsubs.update(s for s in p.substate if match(s))
894 for s in wctx.substate:
906 for s in wctx.substate:
895 removedsubs.discard(s)
907 removedsubs.discard(s)
896 if match(s) and wctx.sub(s).dirty():
908 if match(s) and wctx.sub(s).dirty():
897 subs.append(s)
909 subs.append(s)
898 if (subs or removedsubs):
910 if (subs or removedsubs):
899 if (not match('.hgsub') and
911 if (not match('.hgsub') and
900 '.hgsub' in (wctx.modified() + wctx.added())):
912 '.hgsub' in (wctx.modified() + wctx.added())):
901 raise util.Abort(_("can't commit subrepos without .hgsub"))
913 raise util.Abort(_("can't commit subrepos without .hgsub"))
902 if '.hgsubstate' not in changes[0]:
914 if '.hgsubstate' not in changes[0]:
903 changes[0].insert(0, '.hgsubstate')
915 changes[0].insert(0, '.hgsubstate')
904
916
905 # make sure all explicit patterns are matched
917 # make sure all explicit patterns are matched
906 if not force and match.files():
918 if not force and match.files():
907 matched = set(changes[0] + changes[1] + changes[2])
919 matched = set(changes[0] + changes[1] + changes[2])
908
920
909 for f in match.files():
921 for f in match.files():
910 if f == '.' or f in matched or f in wctx.substate:
922 if f == '.' or f in matched or f in wctx.substate:
911 continue
923 continue
912 if f in changes[3]: # missing
924 if f in changes[3]: # missing
913 fail(f, _('file not found!'))
925 fail(f, _('file not found!'))
914 if f in vdirs: # visited directory
926 if f in vdirs: # visited directory
915 d = f + '/'
927 d = f + '/'
916 for mf in matched:
928 for mf in matched:
917 if mf.startswith(d):
929 if mf.startswith(d):
918 break
930 break
919 else:
931 else:
920 fail(f, _("no match under directory!"))
932 fail(f, _("no match under directory!"))
921 elif f not in self.dirstate:
933 elif f not in self.dirstate:
922 fail(f, _("file not tracked!"))
934 fail(f, _("file not tracked!"))
923
935
924 if (not force and not extra.get("close") and not merge
936 if (not force and not extra.get("close") and not merge
925 and not (changes[0] or changes[1] or changes[2])
937 and not (changes[0] or changes[1] or changes[2])
926 and wctx.branch() == wctx.p1().branch()):
938 and wctx.branch() == wctx.p1().branch()):
927 return None
939 return None
928
940
929 ms = mergemod.mergestate(self)
941 ms = mergemod.mergestate(self)
930 for f in changes[0]:
942 for f in changes[0]:
931 if f in ms and ms[f] == 'u':
943 if f in ms and ms[f] == 'u':
932 raise util.Abort(_("unresolved merge conflicts "
944 raise util.Abort(_("unresolved merge conflicts "
933 "(see hg resolve)"))
945 "(see hg resolve)"))
934
946
935 cctx = context.workingctx(self, text, user, date, extra, changes)
947 cctx = context.workingctx(self, text, user, date, extra, changes)
936 if editor:
948 if editor:
937 cctx._text = editor(self, cctx, subs)
949 cctx._text = editor(self, cctx, subs)
938 edited = (text != cctx._text)
950 edited = (text != cctx._text)
939
951
940 # commit subs
952 # commit subs
941 if subs or removedsubs:
953 if subs or removedsubs:
942 state = wctx.substate.copy()
954 state = wctx.substate.copy()
943 for s in sorted(subs):
955 for s in sorted(subs):
944 sub = wctx.sub(s)
956 sub = wctx.sub(s)
945 self.ui.status(_('committing subrepository %s\n') %
957 self.ui.status(_('committing subrepository %s\n') %
946 subrepo.subrelpath(sub))
958 subrepo.subrelpath(sub))
947 sr = sub.commit(cctx._text, user, date)
959 sr = sub.commit(cctx._text, user, date)
948 state[s] = (state[s][0], sr)
960 state[s] = (state[s][0], sr)
949 subrepo.writestate(self, state)
961 subrepo.writestate(self, state)
950
962
951 # Save commit message in case this transaction gets rolled back
963 # Save commit message in case this transaction gets rolled back
952 # (e.g. by a pretxncommit hook). Leave the content alone on
964 # (e.g. by a pretxncommit hook). Leave the content alone on
953 # the assumption that the user will use the same editor again.
965 # the assumption that the user will use the same editor again.
954 msgfile = self.opener('last-message.txt', 'wb')
966 msgfile = self.opener('last-message.txt', 'wb')
955 msgfile.write(cctx._text)
967 msgfile.write(cctx._text)
956 msgfile.close()
968 msgfile.close()
957
969
958 p1, p2 = self.dirstate.parents()
970 p1, p2 = self.dirstate.parents()
959 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
971 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
960 try:
972 try:
961 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
973 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
962 ret = self.commitctx(cctx, True)
974 ret = self.commitctx(cctx, True)
963 except:
975 except:
964 if edited:
976 if edited:
965 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
977 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
966 self.ui.write(
978 self.ui.write(
967 _('note: commit message saved in %s\n') % msgfn)
979 _('note: commit message saved in %s\n') % msgfn)
968 raise
980 raise
969
981
970 # update dirstate and mergestate
982 # update dirstate and mergestate
971 for f in changes[0] + changes[1]:
983 for f in changes[0] + changes[1]:
972 self.dirstate.normal(f)
984 self.dirstate.normal(f)
973 for f in changes[2]:
985 for f in changes[2]:
974 self.dirstate.forget(f)
986 self.dirstate.forget(f)
975 self.dirstate.setparents(ret)
987 self.dirstate.setparents(ret)
976 ms.reset()
988 ms.reset()
977 finally:
989 finally:
978 wlock.release()
990 wlock.release()
979
991
980 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
992 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
981 return ret
993 return ret
982
994
983 def commitctx(self, ctx, error=False):
995 def commitctx(self, ctx, error=False):
984 """Add a new revision to current repository.
996 """Add a new revision to current repository.
985 Revision information is passed via the context argument.
997 Revision information is passed via the context argument.
986 """
998 """
987
999
988 tr = lock = None
1000 tr = lock = None
989 removed = list(ctx.removed())
1001 removed = list(ctx.removed())
990 p1, p2 = ctx.p1(), ctx.p2()
1002 p1, p2 = ctx.p1(), ctx.p2()
991 m1 = p1.manifest().copy()
1003 m1 = p1.manifest().copy()
992 m2 = p2.manifest()
1004 m2 = p2.manifest()
993 user = ctx.user()
1005 user = ctx.user()
994
1006
995 lock = self.lock()
1007 lock = self.lock()
996 try:
1008 try:
997 tr = self.transaction("commit")
1009 tr = self.transaction("commit")
998 trp = weakref.proxy(tr)
1010 trp = weakref.proxy(tr)
999
1011
1000 # check in files
1012 # check in files
1001 new = {}
1013 new = {}
1002 changed = []
1014 changed = []
1003 linkrev = len(self)
1015 linkrev = len(self)
1004 for f in sorted(ctx.modified() + ctx.added()):
1016 for f in sorted(ctx.modified() + ctx.added()):
1005 self.ui.note(f + "\n")
1017 self.ui.note(f + "\n")
1006 try:
1018 try:
1007 fctx = ctx[f]
1019 fctx = ctx[f]
1008 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1020 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1009 changed)
1021 changed)
1010 m1.set(f, fctx.flags())
1022 m1.set(f, fctx.flags())
1011 except OSError, inst:
1023 except OSError, inst:
1012 self.ui.warn(_("trouble committing %s!\n") % f)
1024 self.ui.warn(_("trouble committing %s!\n") % f)
1013 raise
1025 raise
1014 except IOError, inst:
1026 except IOError, inst:
1015 errcode = getattr(inst, 'errno', errno.ENOENT)
1027 errcode = getattr(inst, 'errno', errno.ENOENT)
1016 if error or errcode and errcode != errno.ENOENT:
1028 if error or errcode and errcode != errno.ENOENT:
1017 self.ui.warn(_("trouble committing %s!\n") % f)
1029 self.ui.warn(_("trouble committing %s!\n") % f)
1018 raise
1030 raise
1019 else:
1031 else:
1020 removed.append(f)
1032 removed.append(f)
1021
1033
1022 # update manifest
1034 # update manifest
1023 m1.update(new)
1035 m1.update(new)
1024 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1036 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1025 drop = [f for f in removed if f in m1]
1037 drop = [f for f in removed if f in m1]
1026 for f in drop:
1038 for f in drop:
1027 del m1[f]
1039 del m1[f]
1028 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1040 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1029 p2.manifestnode(), (new, drop))
1041 p2.manifestnode(), (new, drop))
1030
1042
1031 # update changelog
1043 # update changelog
1032 self.changelog.delayupdate()
1044 self.changelog.delayupdate()
1033 n = self.changelog.add(mn, changed + removed, ctx.description(),
1045 n = self.changelog.add(mn, changed + removed, ctx.description(),
1034 trp, p1.node(), p2.node(),
1046 trp, p1.node(), p2.node(),
1035 user, ctx.date(), ctx.extra().copy())
1047 user, ctx.date(), ctx.extra().copy())
1036 p = lambda: self.changelog.writepending() and self.root or ""
1048 p = lambda: self.changelog.writepending() and self.root or ""
1037 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1049 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1038 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1050 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1039 parent2=xp2, pending=p)
1051 parent2=xp2, pending=p)
1040 self.changelog.finalize(trp)
1052 self.changelog.finalize(trp)
1041 tr.close()
1053 tr.close()
1042
1054
1043 if self._branchcache:
1055 if self._branchcache:
1044 self.updatebranchcache()
1056 self.updatebranchcache()
1045 return n
1057 return n
1046 finally:
1058 finally:
1047 if tr:
1059 if tr:
1048 tr.release()
1060 tr.release()
1049 lock.release()
1061 lock.release()
1050
1062
1051 def destroyed(self):
1063 def destroyed(self):
1052 '''Inform the repository that nodes have been destroyed.
1064 '''Inform the repository that nodes have been destroyed.
1053 Intended for use by strip and rollback, so there's a common
1065 Intended for use by strip and rollback, so there's a common
1054 place for anything that has to be done after destroying history.'''
1066 place for anything that has to be done after destroying history.'''
1055 # XXX it might be nice if we could take the list of destroyed
1067 # XXX it might be nice if we could take the list of destroyed
1056 # nodes, but I don't see an easy way for rollback() to do that
1068 # nodes, but I don't see an easy way for rollback() to do that
1057
1069
1058 # Ensure the persistent tag cache is updated. Doing it now
1070 # Ensure the persistent tag cache is updated. Doing it now
1059 # means that the tag cache only has to worry about destroyed
1071 # means that the tag cache only has to worry about destroyed
1060 # heads immediately after a strip/rollback. That in turn
1072 # heads immediately after a strip/rollback. That in turn
1061 # guarantees that "cachetip == currenttip" (comparing both rev
1073 # guarantees that "cachetip == currenttip" (comparing both rev
1062 # and node) always means no nodes have been added or destroyed.
1074 # and node) always means no nodes have been added or destroyed.
1063
1075
1064 # XXX this is suboptimal when qrefresh'ing: we strip the current
1076 # XXX this is suboptimal when qrefresh'ing: we strip the current
1065 # head, refresh the tag cache, then immediately add a new head.
1077 # head, refresh the tag cache, then immediately add a new head.
1066 # But I think doing it this way is necessary for the "instant
1078 # But I think doing it this way is necessary for the "instant
1067 # tag cache retrieval" case to work.
1079 # tag cache retrieval" case to work.
1068 self.invalidatecaches()
1080 self.invalidatecaches()
1069
1081
1070 def walk(self, match, node=None):
1082 def walk(self, match, node=None):
1071 '''
1083 '''
1072 walk recursively through the directory tree or a given
1084 walk recursively through the directory tree or a given
1073 changeset, finding all files matched by the match
1085 changeset, finding all files matched by the match
1074 function
1086 function
1075 '''
1087 '''
1076 return self[node].walk(match)
1088 return self[node].walk(match)
1077
1089
1078 def status(self, node1='.', node2=None, match=None,
1090 def status(self, node1='.', node2=None, match=None,
1079 ignored=False, clean=False, unknown=False,
1091 ignored=False, clean=False, unknown=False,
1080 listsubrepos=False):
1092 listsubrepos=False):
1081 """return status of files between two nodes or node and working directory
1093 """return status of files between two nodes or node and working directory
1082
1094
1083 If node1 is None, use the first dirstate parent instead.
1095 If node1 is None, use the first dirstate parent instead.
1084 If node2 is None, compare node1 with working directory.
1096 If node2 is None, compare node1 with working directory.
1085 """
1097 """
1086
1098
1087 def mfmatches(ctx):
1099 def mfmatches(ctx):
1088 mf = ctx.manifest().copy()
1100 mf = ctx.manifest().copy()
1089 for fn in mf.keys():
1101 for fn in mf.keys():
1090 if not match(fn):
1102 if not match(fn):
1091 del mf[fn]
1103 del mf[fn]
1092 return mf
1104 return mf
1093
1105
1094 if isinstance(node1, context.changectx):
1106 if isinstance(node1, context.changectx):
1095 ctx1 = node1
1107 ctx1 = node1
1096 else:
1108 else:
1097 ctx1 = self[node1]
1109 ctx1 = self[node1]
1098 if isinstance(node2, context.changectx):
1110 if isinstance(node2, context.changectx):
1099 ctx2 = node2
1111 ctx2 = node2
1100 else:
1112 else:
1101 ctx2 = self[node2]
1113 ctx2 = self[node2]
1102
1114
1103 working = ctx2.rev() is None
1115 working = ctx2.rev() is None
1104 parentworking = working and ctx1 == self['.']
1116 parentworking = working and ctx1 == self['.']
1105 match = match or matchmod.always(self.root, self.getcwd())
1117 match = match or matchmod.always(self.root, self.getcwd())
1106 listignored, listclean, listunknown = ignored, clean, unknown
1118 listignored, listclean, listunknown = ignored, clean, unknown
1107
1119
1108 # load earliest manifest first for caching reasons
1120 # load earliest manifest first for caching reasons
1109 if not working and ctx2.rev() < ctx1.rev():
1121 if not working and ctx2.rev() < ctx1.rev():
1110 ctx2.manifest()
1122 ctx2.manifest()
1111
1123
1112 if not parentworking:
1124 if not parentworking:
1113 def bad(f, msg):
1125 def bad(f, msg):
1114 if f not in ctx1:
1126 if f not in ctx1:
1115 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1127 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1116 match.bad = bad
1128 match.bad = bad
1117
1129
1118 if working: # we need to scan the working dir
1130 if working: # we need to scan the working dir
1119 subrepos = []
1131 subrepos = []
1120 if '.hgsub' in self.dirstate:
1132 if '.hgsub' in self.dirstate:
1121 subrepos = ctx1.substate.keys()
1133 subrepos = ctx1.substate.keys()
1122 s = self.dirstate.status(match, subrepos, listignored,
1134 s = self.dirstate.status(match, subrepos, listignored,
1123 listclean, listunknown)
1135 listclean, listunknown)
1124 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1136 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1125
1137
1126 # check for any possibly clean files
1138 # check for any possibly clean files
1127 if parentworking and cmp:
1139 if parentworking and cmp:
1128 fixup = []
1140 fixup = []
1129 # do a full compare of any files that might have changed
1141 # do a full compare of any files that might have changed
1130 for f in sorted(cmp):
1142 for f in sorted(cmp):
1131 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1143 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1132 or ctx1[f].cmp(ctx2[f])):
1144 or ctx1[f].cmp(ctx2[f])):
1133 modified.append(f)
1145 modified.append(f)
1134 else:
1146 else:
1135 fixup.append(f)
1147 fixup.append(f)
1136
1148
1137 # update dirstate for files that are actually clean
1149 # update dirstate for files that are actually clean
1138 if fixup:
1150 if fixup:
1139 if listclean:
1151 if listclean:
1140 clean += fixup
1152 clean += fixup
1141
1153
1142 try:
1154 try:
1143 # updating the dirstate is optional
1155 # updating the dirstate is optional
1144 # so we don't wait on the lock
1156 # so we don't wait on the lock
1145 wlock = self.wlock(False)
1157 wlock = self.wlock(False)
1146 try:
1158 try:
1147 for f in fixup:
1159 for f in fixup:
1148 self.dirstate.normal(f)
1160 self.dirstate.normal(f)
1149 finally:
1161 finally:
1150 wlock.release()
1162 wlock.release()
1151 except error.LockError:
1163 except error.LockError:
1152 pass
1164 pass
1153
1165
1154 if not parentworking:
1166 if not parentworking:
1155 mf1 = mfmatches(ctx1)
1167 mf1 = mfmatches(ctx1)
1156 if working:
1168 if working:
1157 # we are comparing working dir against non-parent
1169 # we are comparing working dir against non-parent
1158 # generate a pseudo-manifest for the working dir
1170 # generate a pseudo-manifest for the working dir
1159 mf2 = mfmatches(self['.'])
1171 mf2 = mfmatches(self['.'])
1160 for f in cmp + modified + added:
1172 for f in cmp + modified + added:
1161 mf2[f] = None
1173 mf2[f] = None
1162 mf2.set(f, ctx2.flags(f))
1174 mf2.set(f, ctx2.flags(f))
1163 for f in removed:
1175 for f in removed:
1164 if f in mf2:
1176 if f in mf2:
1165 del mf2[f]
1177 del mf2[f]
1166 else:
1178 else:
1167 # we are comparing two revisions
1179 # we are comparing two revisions
1168 deleted, unknown, ignored = [], [], []
1180 deleted, unknown, ignored = [], [], []
1169 mf2 = mfmatches(ctx2)
1181 mf2 = mfmatches(ctx2)
1170
1182
1171 modified, added, clean = [], [], []
1183 modified, added, clean = [], [], []
1172 for fn in mf2:
1184 for fn in mf2:
1173 if fn in mf1:
1185 if fn in mf1:
1174 if (mf1.flags(fn) != mf2.flags(fn) or
1186 if (mf1.flags(fn) != mf2.flags(fn) or
1175 (mf1[fn] != mf2[fn] and
1187 (mf1[fn] != mf2[fn] and
1176 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1188 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1177 modified.append(fn)
1189 modified.append(fn)
1178 elif listclean:
1190 elif listclean:
1179 clean.append(fn)
1191 clean.append(fn)
1180 del mf1[fn]
1192 del mf1[fn]
1181 else:
1193 else:
1182 added.append(fn)
1194 added.append(fn)
1183 removed = mf1.keys()
1195 removed = mf1.keys()
1184
1196
1185 r = modified, added, removed, deleted, unknown, ignored, clean
1197 r = modified, added, removed, deleted, unknown, ignored, clean
1186
1198
1187 if listsubrepos:
1199 if listsubrepos:
1188 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1200 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1189 if working:
1201 if working:
1190 rev2 = None
1202 rev2 = None
1191 else:
1203 else:
1192 rev2 = ctx2.substate[subpath][1]
1204 rev2 = ctx2.substate[subpath][1]
1193 try:
1205 try:
1194 submatch = matchmod.narrowmatcher(subpath, match)
1206 submatch = matchmod.narrowmatcher(subpath, match)
1195 s = sub.status(rev2, match=submatch, ignored=listignored,
1207 s = sub.status(rev2, match=submatch, ignored=listignored,
1196 clean=listclean, unknown=listunknown,
1208 clean=listclean, unknown=listunknown,
1197 listsubrepos=True)
1209 listsubrepos=True)
1198 for rfiles, sfiles in zip(r, s):
1210 for rfiles, sfiles in zip(r, s):
1199 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1211 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1200 except error.LookupError:
1212 except error.LookupError:
1201 self.ui.status(_("skipping missing subrepository: %s\n")
1213 self.ui.status(_("skipping missing subrepository: %s\n")
1202 % subpath)
1214 % subpath)
1203
1215
1204 [l.sort() for l in r]
1216 [l.sort() for l in r]
1205 return r
1217 return r
1206
1218
1207 def heads(self, start=None):
1219 def heads(self, start=None):
1208 heads = self.changelog.heads(start)
1220 heads = self.changelog.heads(start)
1209 # sort the output in rev descending order
1221 # sort the output in rev descending order
1210 return sorted(heads, key=self.changelog.rev, reverse=True)
1222 return sorted(heads, key=self.changelog.rev, reverse=True)
1211
1223
1212 def branchheads(self, branch=None, start=None, closed=False):
1224 def branchheads(self, branch=None, start=None, closed=False):
1213 '''return a (possibly filtered) list of heads for the given branch
1225 '''return a (possibly filtered) list of heads for the given branch
1214
1226
1215 Heads are returned in topological order, from newest to oldest.
1227 Heads are returned in topological order, from newest to oldest.
1216 If branch is None, use the dirstate branch.
1228 If branch is None, use the dirstate branch.
1217 If start is not None, return only heads reachable from start.
1229 If start is not None, return only heads reachable from start.
1218 If closed is True, return heads that are marked as closed as well.
1230 If closed is True, return heads that are marked as closed as well.
1219 '''
1231 '''
1220 if branch is None:
1232 if branch is None:
1221 branch = self[None].branch()
1233 branch = self[None].branch()
1222 branches = self.branchmap()
1234 branches = self.branchmap()
1223 if branch not in branches:
1235 if branch not in branches:
1224 return []
1236 return []
1225 # the cache returns heads ordered lowest to highest
1237 # the cache returns heads ordered lowest to highest
1226 bheads = list(reversed(branches[branch]))
1238 bheads = list(reversed(branches[branch]))
1227 if start is not None:
1239 if start is not None:
1228 # filter out the heads that cannot be reached from startrev
1240 # filter out the heads that cannot be reached from startrev
1229 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1241 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1230 bheads = [h for h in bheads if h in fbheads]
1242 bheads = [h for h in bheads if h in fbheads]
1231 if not closed:
1243 if not closed:
1232 bheads = [h for h in bheads if
1244 bheads = [h for h in bheads if
1233 ('close' not in self.changelog.read(h)[5])]
1245 ('close' not in self.changelog.read(h)[5])]
1234 return bheads
1246 return bheads
1235
1247
1236 def branches(self, nodes):
1248 def branches(self, nodes):
1237 if not nodes:
1249 if not nodes:
1238 nodes = [self.changelog.tip()]
1250 nodes = [self.changelog.tip()]
1239 b = []
1251 b = []
1240 for n in nodes:
1252 for n in nodes:
1241 t = n
1253 t = n
1242 while 1:
1254 while 1:
1243 p = self.changelog.parents(n)
1255 p = self.changelog.parents(n)
1244 if p[1] != nullid or p[0] == nullid:
1256 if p[1] != nullid or p[0] == nullid:
1245 b.append((t, n, p[0], p[1]))
1257 b.append((t, n, p[0], p[1]))
1246 break
1258 break
1247 n = p[0]
1259 n = p[0]
1248 return b
1260 return b
1249
1261
1250 def between(self, pairs):
1262 def between(self, pairs):
1251 r = []
1263 r = []
1252
1264
1253 for top, bottom in pairs:
1265 for top, bottom in pairs:
1254 n, l, i = top, [], 0
1266 n, l, i = top, [], 0
1255 f = 1
1267 f = 1
1256
1268
1257 while n != bottom and n != nullid:
1269 while n != bottom and n != nullid:
1258 p = self.changelog.parents(n)[0]
1270 p = self.changelog.parents(n)[0]
1259 if i == f:
1271 if i == f:
1260 l.append(n)
1272 l.append(n)
1261 f = f * 2
1273 f = f * 2
1262 n = p
1274 n = p
1263 i += 1
1275 i += 1
1264
1276
1265 r.append(l)
1277 r.append(l)
1266
1278
1267 return r
1279 return r
1268
1280
1269 def pull(self, remote, heads=None, force=False):
1281 def pull(self, remote, heads=None, force=False):
1270 lock = self.lock()
1282 lock = self.lock()
1271 try:
1283 try:
1272 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1284 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1273 force=force)
1285 force=force)
1274 common, fetch, rheads = tmp
1286 common, fetch, rheads = tmp
1275 if not fetch:
1287 if not fetch:
1276 self.ui.status(_("no changes found\n"))
1288 self.ui.status(_("no changes found\n"))
1277 return 0
1289 return 0
1278
1290
1279 if heads is None and fetch == [nullid]:
1291 if heads is None and fetch == [nullid]:
1280 self.ui.status(_("requesting all changes\n"))
1292 self.ui.status(_("requesting all changes\n"))
1281 elif heads is None and remote.capable('changegroupsubset'):
1293 elif heads is None and remote.capable('changegroupsubset'):
1282 # issue1320, avoid a race if remote changed after discovery
1294 # issue1320, avoid a race if remote changed after discovery
1283 heads = rheads
1295 heads = rheads
1284
1296
1285 if heads is None:
1297 if heads is None:
1286 cg = remote.changegroup(fetch, 'pull')
1298 cg = remote.changegroup(fetch, 'pull')
1287 else:
1299 else:
1288 if not remote.capable('changegroupsubset'):
1300 if not remote.capable('changegroupsubset'):
1289 raise util.Abort(_("partial pull cannot be done because "
1301 raise util.Abort(_("partial pull cannot be done because "
1290 "other repository doesn't support "
1302 "other repository doesn't support "
1291 "changegroupsubset."))
1303 "changegroupsubset."))
1292 cg = remote.changegroupsubset(fetch, heads, 'pull')
1304 cg = remote.changegroupsubset(fetch, heads, 'pull')
1293 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1305 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1294 finally:
1306 finally:
1295 lock.release()
1307 lock.release()
1296
1308
1297 def push(self, remote, force=False, revs=None, newbranch=False):
1309 def push(self, remote, force=False, revs=None, newbranch=False):
1298 '''Push outgoing changesets (limited by revs) from the current
1310 '''Push outgoing changesets (limited by revs) from the current
1299 repository to remote. Return an integer:
1311 repository to remote. Return an integer:
1300 - 0 means HTTP error *or* nothing to push
1312 - 0 means HTTP error *or* nothing to push
1301 - 1 means we pushed and remote head count is unchanged *or*
1313 - 1 means we pushed and remote head count is unchanged *or*
1302 we have outgoing changesets but refused to push
1314 we have outgoing changesets but refused to push
1303 - other values as described by addchangegroup()
1315 - other values as described by addchangegroup()
1304 '''
1316 '''
1305 # there are two ways to push to remote repo:
1317 # there are two ways to push to remote repo:
1306 #
1318 #
1307 # addchangegroup assumes local user can lock remote
1319 # addchangegroup assumes local user can lock remote
1308 # repo (local filesystem, old ssh servers).
1320 # repo (local filesystem, old ssh servers).
1309 #
1321 #
1310 # unbundle assumes local user cannot lock remote repo (new ssh
1322 # unbundle assumes local user cannot lock remote repo (new ssh
1311 # servers, http servers).
1323 # servers, http servers).
1312
1324
1313 lock = None
1325 lock = None
1314 unbundle = remote.capable('unbundle')
1326 unbundle = remote.capable('unbundle')
1315 if not unbundle:
1327 if not unbundle:
1316 lock = remote.lock()
1328 lock = remote.lock()
1317 try:
1329 try:
1318 ret = discovery.prepush(self, remote, force, revs, newbranch)
1330 ret = discovery.prepush(self, remote, force, revs, newbranch)
1319 if ret[0] is None:
1331 if ret[0] is None:
1320 # and here we return 0 for "nothing to push" or 1 for
1332 # and here we return 0 for "nothing to push" or 1 for
1321 # "something to push but I refuse"
1333 # "something to push but I refuse"
1322 return ret[1]
1334 return ret[1]
1323
1335
1324 cg, remote_heads = ret
1336 cg, remote_heads = ret
1325 if unbundle:
1337 if unbundle:
1326 # local repo finds heads on server, finds out what revs it must
1338 # local repo finds heads on server, finds out what revs it must
1327 # push. once revs transferred, if server finds it has
1339 # push. once revs transferred, if server finds it has
1328 # different heads (someone else won commit/push race), server
1340 # different heads (someone else won commit/push race), server
1329 # aborts.
1341 # aborts.
1330 if force:
1342 if force:
1331 remote_heads = ['force']
1343 remote_heads = ['force']
1332 # ssh: return remote's addchangegroup()
1344 # ssh: return remote's addchangegroup()
1333 # http: return remote's addchangegroup() or 0 for error
1345 # http: return remote's addchangegroup() or 0 for error
1334 return remote.unbundle(cg, remote_heads, 'push')
1346 return remote.unbundle(cg, remote_heads, 'push')
1335 else:
1347 else:
1336 # we return an integer indicating remote head count change
1348 # we return an integer indicating remote head count change
1337 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1349 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1338 finally:
1350 finally:
1339 if lock is not None:
1351 if lock is not None:
1340 lock.release()
1352 lock.release()
1341
1353
1342 def changegroupinfo(self, nodes, source):
1354 def changegroupinfo(self, nodes, source):
1343 if self.ui.verbose or source == 'bundle':
1355 if self.ui.verbose or source == 'bundle':
1344 self.ui.status(_("%d changesets found\n") % len(nodes))
1356 self.ui.status(_("%d changesets found\n") % len(nodes))
1345 if self.ui.debugflag:
1357 if self.ui.debugflag:
1346 self.ui.debug("list of changesets:\n")
1358 self.ui.debug("list of changesets:\n")
1347 for node in nodes:
1359 for node in nodes:
1348 self.ui.debug("%s\n" % hex(node))
1360 self.ui.debug("%s\n" % hex(node))
1349
1361
1350 def changegroupsubset(self, bases, heads, source, extranodes=None):
1362 def changegroupsubset(self, bases, heads, source, extranodes=None):
1351 """Compute a changegroup consisting of all the nodes that are
1363 """Compute a changegroup consisting of all the nodes that are
1352 descendents of any of the bases and ancestors of any of the heads.
1364 descendents of any of the bases and ancestors of any of the heads.
1353 Return a chunkbuffer object whose read() method will return
1365 Return a chunkbuffer object whose read() method will return
1354 successive changegroup chunks.
1366 successive changegroup chunks.
1355
1367
1356 It is fairly complex as determining which filenodes and which
1368 It is fairly complex as determining which filenodes and which
1357 manifest nodes need to be included for the changeset to be complete
1369 manifest nodes need to be included for the changeset to be complete
1358 is non-trivial.
1370 is non-trivial.
1359
1371
1360 Another wrinkle is doing the reverse, figuring out which changeset in
1372 Another wrinkle is doing the reverse, figuring out which changeset in
1361 the changegroup a particular filenode or manifestnode belongs to.
1373 the changegroup a particular filenode or manifestnode belongs to.
1362
1374
1363 The caller can specify some nodes that must be included in the
1375 The caller can specify some nodes that must be included in the
1364 changegroup using the extranodes argument. It should be a dict
1376 changegroup using the extranodes argument. It should be a dict
1365 where the keys are the filenames (or 1 for the manifest), and the
1377 where the keys are the filenames (or 1 for the manifest), and the
1366 values are lists of (node, linknode) tuples, where node is a wanted
1378 values are lists of (node, linknode) tuples, where node is a wanted
1367 node and linknode is the changelog node that should be transmitted as
1379 node and linknode is the changelog node that should be transmitted as
1368 the linkrev.
1380 the linkrev.
1369 """
1381 """
1370
1382
1371 # Set up some initial variables
1383 # Set up some initial variables
1372 # Make it easy to refer to self.changelog
1384 # Make it easy to refer to self.changelog
1373 cl = self.changelog
1385 cl = self.changelog
1374 # Compute the list of changesets in this changegroup.
1386 # Compute the list of changesets in this changegroup.
1375 # Some bases may turn out to be superfluous, and some heads may be
1387 # Some bases may turn out to be superfluous, and some heads may be
1376 # too. nodesbetween will return the minimal set of bases and heads
1388 # too. nodesbetween will return the minimal set of bases and heads
1377 # necessary to re-create the changegroup.
1389 # necessary to re-create the changegroup.
1378 if not bases:
1390 if not bases:
1379 bases = [nullid]
1391 bases = [nullid]
1380 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1392 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1381
1393
1382 if extranodes is None:
1394 if extranodes is None:
1383 # can we go through the fast path ?
1395 # can we go through the fast path ?
1384 heads.sort()
1396 heads.sort()
1385 allheads = self.heads()
1397 allheads = self.heads()
1386 allheads.sort()
1398 allheads.sort()
1387 if heads == allheads:
1399 if heads == allheads:
1388 return self._changegroup(msng_cl_lst, source)
1400 return self._changegroup(msng_cl_lst, source)
1389
1401
1390 # slow path
1402 # slow path
1391 self.hook('preoutgoing', throw=True, source=source)
1403 self.hook('preoutgoing', throw=True, source=source)
1392
1404
1393 self.changegroupinfo(msng_cl_lst, source)
1405 self.changegroupinfo(msng_cl_lst, source)
1394
1406
1395 # We assume that all ancestors of bases are known
1407 # We assume that all ancestors of bases are known
1396 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1408 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1397
1409
1398 # Make it easy to refer to self.manifest
1410 # Make it easy to refer to self.manifest
1399 mnfst = self.manifest
1411 mnfst = self.manifest
1400 # We don't know which manifests are missing yet
1412 # We don't know which manifests are missing yet
1401 msng_mnfst_set = {}
1413 msng_mnfst_set = {}
1402 # Nor do we know which filenodes are missing.
1414 # Nor do we know which filenodes are missing.
1403 msng_filenode_set = {}
1415 msng_filenode_set = {}
1404
1416
1405 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1417 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1406 junk = None
1418 junk = None
1407
1419
1408 # A changeset always belongs to itself, so the changenode lookup
1420 # A changeset always belongs to itself, so the changenode lookup
1409 # function for a changenode is identity.
1421 # function for a changenode is identity.
1410 def identity(x):
1422 def identity(x):
1411 return x
1423 return x
1412
1424
1413 # A function generating function that sets up the initial environment
1425 # A function generating function that sets up the initial environment
1414 # the inner function.
1426 # the inner function.
1415 def filenode_collector(changedfiles):
1427 def filenode_collector(changedfiles):
1416 # This gathers information from each manifestnode included in the
1428 # This gathers information from each manifestnode included in the
1417 # changegroup about which filenodes the manifest node references
1429 # changegroup about which filenodes the manifest node references
1418 # so we can include those in the changegroup too.
1430 # so we can include those in the changegroup too.
1419 #
1431 #
1420 # It also remembers which changenode each filenode belongs to. It
1432 # It also remembers which changenode each filenode belongs to. It
1421 # does this by assuming the a filenode belongs to the changenode
1433 # does this by assuming the a filenode belongs to the changenode
1422 # the first manifest that references it belongs to.
1434 # the first manifest that references it belongs to.
1423 def collect_msng_filenodes(mnfstnode):
1435 def collect_msng_filenodes(mnfstnode):
1424 r = mnfst.rev(mnfstnode)
1436 r = mnfst.rev(mnfstnode)
1425 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1437 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1426 # If the previous rev is one of the parents,
1438 # If the previous rev is one of the parents,
1427 # we only need to see a diff.
1439 # we only need to see a diff.
1428 deltamf = mnfst.readdelta(mnfstnode)
1440 deltamf = mnfst.readdelta(mnfstnode)
1429 # For each line in the delta
1441 # For each line in the delta
1430 for f, fnode in deltamf.iteritems():
1442 for f, fnode in deltamf.iteritems():
1431 # And if the file is in the list of files we care
1443 # And if the file is in the list of files we care
1432 # about.
1444 # about.
1433 if f in changedfiles:
1445 if f in changedfiles:
1434 # Get the changenode this manifest belongs to
1446 # Get the changenode this manifest belongs to
1435 clnode = msng_mnfst_set[mnfstnode]
1447 clnode = msng_mnfst_set[mnfstnode]
1436 # Create the set of filenodes for the file if
1448 # Create the set of filenodes for the file if
1437 # there isn't one already.
1449 # there isn't one already.
1438 ndset = msng_filenode_set.setdefault(f, {})
1450 ndset = msng_filenode_set.setdefault(f, {})
1439 # And set the filenode's changelog node to the
1451 # And set the filenode's changelog node to the
1440 # manifest's if it hasn't been set already.
1452 # manifest's if it hasn't been set already.
1441 ndset.setdefault(fnode, clnode)
1453 ndset.setdefault(fnode, clnode)
1442 else:
1454 else:
1443 # Otherwise we need a full manifest.
1455 # Otherwise we need a full manifest.
1444 m = mnfst.read(mnfstnode)
1456 m = mnfst.read(mnfstnode)
1445 # For every file in we care about.
1457 # For every file in we care about.
1446 for f in changedfiles:
1458 for f in changedfiles:
1447 fnode = m.get(f, None)
1459 fnode = m.get(f, None)
1448 # If it's in the manifest
1460 # If it's in the manifest
1449 if fnode is not None:
1461 if fnode is not None:
1450 # See comments above.
1462 # See comments above.
1451 clnode = msng_mnfst_set[mnfstnode]
1463 clnode = msng_mnfst_set[mnfstnode]
1452 ndset = msng_filenode_set.setdefault(f, {})
1464 ndset = msng_filenode_set.setdefault(f, {})
1453 ndset.setdefault(fnode, clnode)
1465 ndset.setdefault(fnode, clnode)
1454 return collect_msng_filenodes
1466 return collect_msng_filenodes
1455
1467
1456 # If we determine that a particular file or manifest node must be a
1468 # If we determine that a particular file or manifest node must be a
1457 # node that the recipient of the changegroup will already have, we can
1469 # node that the recipient of the changegroup will already have, we can
1458 # also assume the recipient will have all the parents. This function
1470 # also assume the recipient will have all the parents. This function
1459 # prunes them from the set of missing nodes.
1471 # prunes them from the set of missing nodes.
1460 def prune(revlog, missingnodes):
1472 def prune(revlog, missingnodes):
1461 hasset = set()
1473 hasset = set()
1462 # If a 'missing' filenode thinks it belongs to a changenode we
1474 # If a 'missing' filenode thinks it belongs to a changenode we
1463 # assume the recipient must have, then the recipient must have
1475 # assume the recipient must have, then the recipient must have
1464 # that filenode.
1476 # that filenode.
1465 for n in missingnodes:
1477 for n in missingnodes:
1466 clrev = revlog.linkrev(revlog.rev(n))
1478 clrev = revlog.linkrev(revlog.rev(n))
1467 if clrev in commonrevs:
1479 if clrev in commonrevs:
1468 hasset.add(n)
1480 hasset.add(n)
1469 for n in hasset:
1481 for n in hasset:
1470 missingnodes.pop(n, None)
1482 missingnodes.pop(n, None)
1471 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1483 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1472 missingnodes.pop(revlog.node(r), None)
1484 missingnodes.pop(revlog.node(r), None)
1473
1485
1474 # Add the nodes that were explicitly requested.
1486 # Add the nodes that were explicitly requested.
1475 def add_extra_nodes(name, nodes):
1487 def add_extra_nodes(name, nodes):
1476 if not extranodes or name not in extranodes:
1488 if not extranodes or name not in extranodes:
1477 return
1489 return
1478
1490
1479 for node, linknode in extranodes[name]:
1491 for node, linknode in extranodes[name]:
1480 if node not in nodes:
1492 if node not in nodes:
1481 nodes[node] = linknode
1493 nodes[node] = linknode
1482
1494
1483 # Now that we have all theses utility functions to help out and
1495 # Now that we have all theses utility functions to help out and
1484 # logically divide up the task, generate the group.
1496 # logically divide up the task, generate the group.
1485 def gengroup():
1497 def gengroup():
1486 # The set of changed files starts empty.
1498 # The set of changed files starts empty.
1487 changedfiles = set()
1499 changedfiles = set()
1488 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1500 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1489
1501
1490 # Create a changenode group generator that will call our functions
1502 # Create a changenode group generator that will call our functions
1491 # back to lookup the owning changenode and collect information.
1503 # back to lookup the owning changenode and collect information.
1492 group = cl.group(msng_cl_lst, identity, collect)
1504 group = cl.group(msng_cl_lst, identity, collect)
1493 for cnt, chnk in enumerate(group):
1505 for cnt, chnk in enumerate(group):
1494 yield chnk
1506 yield chnk
1495 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1507 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1496 self.ui.progress(_('bundling changes'), None)
1508 self.ui.progress(_('bundling changes'), None)
1497
1509
1498 prune(mnfst, msng_mnfst_set)
1510 prune(mnfst, msng_mnfst_set)
1499 add_extra_nodes(1, msng_mnfst_set)
1511 add_extra_nodes(1, msng_mnfst_set)
1500 msng_mnfst_lst = msng_mnfst_set.keys()
1512 msng_mnfst_lst = msng_mnfst_set.keys()
1501 # Sort the manifestnodes by revision number.
1513 # Sort the manifestnodes by revision number.
1502 msng_mnfst_lst.sort(key=mnfst.rev)
1514 msng_mnfst_lst.sort(key=mnfst.rev)
1503 # Create a generator for the manifestnodes that calls our lookup
1515 # Create a generator for the manifestnodes that calls our lookup
1504 # and data collection functions back.
1516 # and data collection functions back.
1505 group = mnfst.group(msng_mnfst_lst,
1517 group = mnfst.group(msng_mnfst_lst,
1506 lambda mnode: msng_mnfst_set[mnode],
1518 lambda mnode: msng_mnfst_set[mnode],
1507 filenode_collector(changedfiles))
1519 filenode_collector(changedfiles))
1508 for cnt, chnk in enumerate(group):
1520 for cnt, chnk in enumerate(group):
1509 yield chnk
1521 yield chnk
1510 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1522 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1511 self.ui.progress(_('bundling manifests'), None)
1523 self.ui.progress(_('bundling manifests'), None)
1512
1524
1513 # These are no longer needed, dereference and toss the memory for
1525 # These are no longer needed, dereference and toss the memory for
1514 # them.
1526 # them.
1515 msng_mnfst_lst = None
1527 msng_mnfst_lst = None
1516 msng_mnfst_set.clear()
1528 msng_mnfst_set.clear()
1517
1529
1518 if extranodes:
1530 if extranodes:
1519 for fname in extranodes:
1531 for fname in extranodes:
1520 if isinstance(fname, int):
1532 if isinstance(fname, int):
1521 continue
1533 continue
1522 msng_filenode_set.setdefault(fname, {})
1534 msng_filenode_set.setdefault(fname, {})
1523 changedfiles.add(fname)
1535 changedfiles.add(fname)
1524 # Go through all our files in order sorted by name.
1536 # Go through all our files in order sorted by name.
1525 cnt = 0
1537 cnt = 0
1526 for fname in sorted(changedfiles):
1538 for fname in sorted(changedfiles):
1527 filerevlog = self.file(fname)
1539 filerevlog = self.file(fname)
1528 if not len(filerevlog):
1540 if not len(filerevlog):
1529 raise util.Abort(_("empty or missing revlog for %s") % fname)
1541 raise util.Abort(_("empty or missing revlog for %s") % fname)
1530 # Toss out the filenodes that the recipient isn't really
1542 # Toss out the filenodes that the recipient isn't really
1531 # missing.
1543 # missing.
1532 missingfnodes = msng_filenode_set.pop(fname, {})
1544 missingfnodes = msng_filenode_set.pop(fname, {})
1533 prune(filerevlog, missingfnodes)
1545 prune(filerevlog, missingfnodes)
1534 add_extra_nodes(fname, missingfnodes)
1546 add_extra_nodes(fname, missingfnodes)
1535 # If any filenodes are left, generate the group for them,
1547 # If any filenodes are left, generate the group for them,
1536 # otherwise don't bother.
1548 # otherwise don't bother.
1537 if missingfnodes:
1549 if missingfnodes:
1538 yield changegroup.chunkheader(len(fname))
1550 yield changegroup.chunkheader(len(fname))
1539 yield fname
1551 yield fname
1540 # Sort the filenodes by their revision # (topological order)
1552 # Sort the filenodes by their revision # (topological order)
1541 nodeiter = list(missingfnodes)
1553 nodeiter = list(missingfnodes)
1542 nodeiter.sort(key=filerevlog.rev)
1554 nodeiter.sort(key=filerevlog.rev)
1543 # Create a group generator and only pass in a changenode
1555 # Create a group generator and only pass in a changenode
1544 # lookup function as we need to collect no information
1556 # lookup function as we need to collect no information
1545 # from filenodes.
1557 # from filenodes.
1546 group = filerevlog.group(nodeiter,
1558 group = filerevlog.group(nodeiter,
1547 lambda fnode: missingfnodes[fnode])
1559 lambda fnode: missingfnodes[fnode])
1548 for chnk in group:
1560 for chnk in group:
1549 self.ui.progress(
1561 self.ui.progress(
1550 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1562 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1551 cnt += 1
1563 cnt += 1
1552 yield chnk
1564 yield chnk
1553 # Signal that no more groups are left.
1565 # Signal that no more groups are left.
1554 yield changegroup.closechunk()
1566 yield changegroup.closechunk()
1555 self.ui.progress(_('bundling files'), None)
1567 self.ui.progress(_('bundling files'), None)
1556
1568
1557 if msng_cl_lst:
1569 if msng_cl_lst:
1558 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1570 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1559
1571
1560 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1572 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1561
1573
1562 def changegroup(self, basenodes, source):
1574 def changegroup(self, basenodes, source):
1563 # to avoid a race we use changegroupsubset() (issue1320)
1575 # to avoid a race we use changegroupsubset() (issue1320)
1564 return self.changegroupsubset(basenodes, self.heads(), source)
1576 return self.changegroupsubset(basenodes, self.heads(), source)
1565
1577
1566 def _changegroup(self, nodes, source):
1578 def _changegroup(self, nodes, source):
1567 """Compute the changegroup of all nodes that we have that a recipient
1579 """Compute the changegroup of all nodes that we have that a recipient
1568 doesn't. Return a chunkbuffer object whose read() method will return
1580 doesn't. Return a chunkbuffer object whose read() method will return
1569 successive changegroup chunks.
1581 successive changegroup chunks.
1570
1582
1571 This is much easier than the previous function as we can assume that
1583 This is much easier than the previous function as we can assume that
1572 the recipient has any changenode we aren't sending them.
1584 the recipient has any changenode we aren't sending them.
1573
1585
1574 nodes is the set of nodes to send"""
1586 nodes is the set of nodes to send"""
1575
1587
1576 self.hook('preoutgoing', throw=True, source=source)
1588 self.hook('preoutgoing', throw=True, source=source)
1577
1589
1578 cl = self.changelog
1590 cl = self.changelog
1579 revset = set([cl.rev(n) for n in nodes])
1591 revset = set([cl.rev(n) for n in nodes])
1580 self.changegroupinfo(nodes, source)
1592 self.changegroupinfo(nodes, source)
1581
1593
1582 def identity(x):
1594 def identity(x):
1583 return x
1595 return x
1584
1596
1585 def gennodelst(log):
1597 def gennodelst(log):
1586 for r in log:
1598 for r in log:
1587 if log.linkrev(r) in revset:
1599 if log.linkrev(r) in revset:
1588 yield log.node(r)
1600 yield log.node(r)
1589
1601
1590 def lookuplinkrev_func(revlog):
1602 def lookuplinkrev_func(revlog):
1591 def lookuplinkrev(n):
1603 def lookuplinkrev(n):
1592 return cl.node(revlog.linkrev(revlog.rev(n)))
1604 return cl.node(revlog.linkrev(revlog.rev(n)))
1593 return lookuplinkrev
1605 return lookuplinkrev
1594
1606
1595 def gengroup():
1607 def gengroup():
1596 '''yield a sequence of changegroup chunks (strings)'''
1608 '''yield a sequence of changegroup chunks (strings)'''
1597 # construct a list of all changed files
1609 # construct a list of all changed files
1598 changedfiles = set()
1610 changedfiles = set()
1599 mmfs = {}
1611 mmfs = {}
1600 collect = changegroup.collector(cl, mmfs, changedfiles)
1612 collect = changegroup.collector(cl, mmfs, changedfiles)
1601
1613
1602 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1614 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1603 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1615 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1604 yield chnk
1616 yield chnk
1605 self.ui.progress(_('bundling changes'), None)
1617 self.ui.progress(_('bundling changes'), None)
1606
1618
1607 mnfst = self.manifest
1619 mnfst = self.manifest
1608 nodeiter = gennodelst(mnfst)
1620 nodeiter = gennodelst(mnfst)
1609 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1621 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1610 lookuplinkrev_func(mnfst))):
1622 lookuplinkrev_func(mnfst))):
1611 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1623 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1612 yield chnk
1624 yield chnk
1613 self.ui.progress(_('bundling manifests'), None)
1625 self.ui.progress(_('bundling manifests'), None)
1614
1626
1615 cnt = 0
1627 cnt = 0
1616 for fname in sorted(changedfiles):
1628 for fname in sorted(changedfiles):
1617 filerevlog = self.file(fname)
1629 filerevlog = self.file(fname)
1618 if not len(filerevlog):
1630 if not len(filerevlog):
1619 raise util.Abort(_("empty or missing revlog for %s") % fname)
1631 raise util.Abort(_("empty or missing revlog for %s") % fname)
1620 nodeiter = gennodelst(filerevlog)
1632 nodeiter = gennodelst(filerevlog)
1621 nodeiter = list(nodeiter)
1633 nodeiter = list(nodeiter)
1622 if nodeiter:
1634 if nodeiter:
1623 yield changegroup.chunkheader(len(fname))
1635 yield changegroup.chunkheader(len(fname))
1624 yield fname
1636 yield fname
1625 lookup = lookuplinkrev_func(filerevlog)
1637 lookup = lookuplinkrev_func(filerevlog)
1626 for chnk in filerevlog.group(nodeiter, lookup):
1638 for chnk in filerevlog.group(nodeiter, lookup):
1627 self.ui.progress(
1639 self.ui.progress(
1628 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1640 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1629 cnt += 1
1641 cnt += 1
1630 yield chnk
1642 yield chnk
1631 self.ui.progress(_('bundling files'), None)
1643 self.ui.progress(_('bundling files'), None)
1632
1644
1633 yield changegroup.closechunk()
1645 yield changegroup.closechunk()
1634
1646
1635 if nodes:
1647 if nodes:
1636 self.hook('outgoing', node=hex(nodes[0]), source=source)
1648 self.hook('outgoing', node=hex(nodes[0]), source=source)
1637
1649
1638 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1650 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1639
1651
1640 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1652 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1641 """Add the changegroup returned by source.read() to this repo.
1653 """Add the changegroup returned by source.read() to this repo.
1642 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1654 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1643 the URL of the repo where this changegroup is coming from.
1655 the URL of the repo where this changegroup is coming from.
1644
1656
1645 Return an integer summarizing the change to this repo:
1657 Return an integer summarizing the change to this repo:
1646 - nothing changed or no source: 0
1658 - nothing changed or no source: 0
1647 - more heads than before: 1+added heads (2..n)
1659 - more heads than before: 1+added heads (2..n)
1648 - fewer heads than before: -1-removed heads (-2..-n)
1660 - fewer heads than before: -1-removed heads (-2..-n)
1649 - number of heads stays the same: 1
1661 - number of heads stays the same: 1
1650 """
1662 """
1651 def csmap(x):
1663 def csmap(x):
1652 self.ui.debug("add changeset %s\n" % short(x))
1664 self.ui.debug("add changeset %s\n" % short(x))
1653 return len(cl)
1665 return len(cl)
1654
1666
1655 def revmap(x):
1667 def revmap(x):
1656 return cl.rev(x)
1668 return cl.rev(x)
1657
1669
1658 if not source:
1670 if not source:
1659 return 0
1671 return 0
1660
1672
1661 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1673 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1662
1674
1663 changesets = files = revisions = 0
1675 changesets = files = revisions = 0
1664 efiles = set()
1676 efiles = set()
1665
1677
1666 # write changelog data to temp files so concurrent readers will not see
1678 # write changelog data to temp files so concurrent readers will not see
1667 # inconsistent view
1679 # inconsistent view
1668 cl = self.changelog
1680 cl = self.changelog
1669 cl.delayupdate()
1681 cl.delayupdate()
1670 oldheads = len(cl.heads())
1682 oldheads = len(cl.heads())
1671
1683
1672 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1684 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1673 try:
1685 try:
1674 trp = weakref.proxy(tr)
1686 trp = weakref.proxy(tr)
1675 # pull off the changeset group
1687 # pull off the changeset group
1676 self.ui.status(_("adding changesets\n"))
1688 self.ui.status(_("adding changesets\n"))
1677 clstart = len(cl)
1689 clstart = len(cl)
1678 class prog(object):
1690 class prog(object):
1679 step = _('changesets')
1691 step = _('changesets')
1680 count = 1
1692 count = 1
1681 ui = self.ui
1693 ui = self.ui
1682 total = None
1694 total = None
1683 def __call__(self):
1695 def __call__(self):
1684 self.ui.progress(self.step, self.count, unit=_('chunks'),
1696 self.ui.progress(self.step, self.count, unit=_('chunks'),
1685 total=self.total)
1697 total=self.total)
1686 self.count += 1
1698 self.count += 1
1687 pr = prog()
1699 pr = prog()
1688 source.callback = pr
1700 source.callback = pr
1689
1701
1690 if (cl.addgroup(source, csmap, trp) is None
1702 if (cl.addgroup(source, csmap, trp) is None
1691 and not emptyok):
1703 and not emptyok):
1692 raise util.Abort(_("received changelog group is empty"))
1704 raise util.Abort(_("received changelog group is empty"))
1693 clend = len(cl)
1705 clend = len(cl)
1694 changesets = clend - clstart
1706 changesets = clend - clstart
1695 for c in xrange(clstart, clend):
1707 for c in xrange(clstart, clend):
1696 efiles.update(self[c].files())
1708 efiles.update(self[c].files())
1697 efiles = len(efiles)
1709 efiles = len(efiles)
1698 self.ui.progress(_('changesets'), None)
1710 self.ui.progress(_('changesets'), None)
1699
1711
1700 # pull off the manifest group
1712 # pull off the manifest group
1701 self.ui.status(_("adding manifests\n"))
1713 self.ui.status(_("adding manifests\n"))
1702 pr.step = _('manifests')
1714 pr.step = _('manifests')
1703 pr.count = 1
1715 pr.count = 1
1704 pr.total = changesets # manifests <= changesets
1716 pr.total = changesets # manifests <= changesets
1705 # no need to check for empty manifest group here:
1717 # no need to check for empty manifest group here:
1706 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1718 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1707 # no new manifest will be created and the manifest group will
1719 # no new manifest will be created and the manifest group will
1708 # be empty during the pull
1720 # be empty during the pull
1709 self.manifest.addgroup(source, revmap, trp)
1721 self.manifest.addgroup(source, revmap, trp)
1710 self.ui.progress(_('manifests'), None)
1722 self.ui.progress(_('manifests'), None)
1711
1723
1712 needfiles = {}
1724 needfiles = {}
1713 if self.ui.configbool('server', 'validate', default=False):
1725 if self.ui.configbool('server', 'validate', default=False):
1714 # validate incoming csets have their manifests
1726 # validate incoming csets have their manifests
1715 for cset in xrange(clstart, clend):
1727 for cset in xrange(clstart, clend):
1716 mfest = self.changelog.read(self.changelog.node(cset))[0]
1728 mfest = self.changelog.read(self.changelog.node(cset))[0]
1717 mfest = self.manifest.readdelta(mfest)
1729 mfest = self.manifest.readdelta(mfest)
1718 # store file nodes we must see
1730 # store file nodes we must see
1719 for f, n in mfest.iteritems():
1731 for f, n in mfest.iteritems():
1720 needfiles.setdefault(f, set()).add(n)
1732 needfiles.setdefault(f, set()).add(n)
1721
1733
1722 # process the files
1734 # process the files
1723 self.ui.status(_("adding file changes\n"))
1735 self.ui.status(_("adding file changes\n"))
1724 pr.step = 'files'
1736 pr.step = 'files'
1725 pr.count = 1
1737 pr.count = 1
1726 pr.total = efiles
1738 pr.total = efiles
1727 source.callback = None
1739 source.callback = None
1728
1740
1729 while 1:
1741 while 1:
1730 f = source.chunk()
1742 f = source.chunk()
1731 if not f:
1743 if not f:
1732 break
1744 break
1733 self.ui.debug("adding %s revisions\n" % f)
1745 self.ui.debug("adding %s revisions\n" % f)
1734 pr()
1746 pr()
1735 fl = self.file(f)
1747 fl = self.file(f)
1736 o = len(fl)
1748 o = len(fl)
1737 if fl.addgroup(source, revmap, trp) is None:
1749 if fl.addgroup(source, revmap, trp) is None:
1738 raise util.Abort(_("received file revlog group is empty"))
1750 raise util.Abort(_("received file revlog group is empty"))
1739 revisions += len(fl) - o
1751 revisions += len(fl) - o
1740 files += 1
1752 files += 1
1741 if f in needfiles:
1753 if f in needfiles:
1742 needs = needfiles[f]
1754 needs = needfiles[f]
1743 for new in xrange(o, len(fl)):
1755 for new in xrange(o, len(fl)):
1744 n = fl.node(new)
1756 n = fl.node(new)
1745 if n in needs:
1757 if n in needs:
1746 needs.remove(n)
1758 needs.remove(n)
1747 if not needs:
1759 if not needs:
1748 del needfiles[f]
1760 del needfiles[f]
1749 self.ui.progress(_('files'), None)
1761 self.ui.progress(_('files'), None)
1750
1762
1751 for f, needs in needfiles.iteritems():
1763 for f, needs in needfiles.iteritems():
1752 fl = self.file(f)
1764 fl = self.file(f)
1753 for n in needs:
1765 for n in needs:
1754 try:
1766 try:
1755 fl.rev(n)
1767 fl.rev(n)
1756 except error.LookupError:
1768 except error.LookupError:
1757 raise util.Abort(
1769 raise util.Abort(
1758 _('missing file data for %s:%s - run hg verify') %
1770 _('missing file data for %s:%s - run hg verify') %
1759 (f, hex(n)))
1771 (f, hex(n)))
1760
1772
1761 newheads = len(cl.heads())
1773 newheads = len(cl.heads())
1762 heads = ""
1774 heads = ""
1763 if oldheads and newheads != oldheads:
1775 if oldheads and newheads != oldheads:
1764 heads = _(" (%+d heads)") % (newheads - oldheads)
1776 heads = _(" (%+d heads)") % (newheads - oldheads)
1765
1777
1766 self.ui.status(_("added %d changesets"
1778 self.ui.status(_("added %d changesets"
1767 " with %d changes to %d files%s\n")
1779 " with %d changes to %d files%s\n")
1768 % (changesets, revisions, files, heads))
1780 % (changesets, revisions, files, heads))
1769
1781
1770 if changesets > 0:
1782 if changesets > 0:
1771 p = lambda: cl.writepending() and self.root or ""
1783 p = lambda: cl.writepending() and self.root or ""
1772 self.hook('pretxnchangegroup', throw=True,
1784 self.hook('pretxnchangegroup', throw=True,
1773 node=hex(cl.node(clstart)), source=srctype,
1785 node=hex(cl.node(clstart)), source=srctype,
1774 url=url, pending=p)
1786 url=url, pending=p)
1775
1787
1776 # make changelog see real files again
1788 # make changelog see real files again
1777 cl.finalize(trp)
1789 cl.finalize(trp)
1778
1790
1779 tr.close()
1791 tr.close()
1780 finally:
1792 finally:
1781 tr.release()
1793 tr.release()
1782 if lock:
1794 if lock:
1783 lock.release()
1795 lock.release()
1784
1796
1785 if changesets > 0:
1797 if changesets > 0:
1786 # forcefully update the on-disk branch cache
1798 # forcefully update the on-disk branch cache
1787 self.ui.debug("updating the branch cache\n")
1799 self.ui.debug("updating the branch cache\n")
1788 self.updatebranchcache()
1800 self.updatebranchcache()
1789 self.hook("changegroup", node=hex(cl.node(clstart)),
1801 self.hook("changegroup", node=hex(cl.node(clstart)),
1790 source=srctype, url=url)
1802 source=srctype, url=url)
1791
1803
1792 for i in xrange(clstart, clend):
1804 for i in xrange(clstart, clend):
1793 self.hook("incoming", node=hex(cl.node(i)),
1805 self.hook("incoming", node=hex(cl.node(i)),
1794 source=srctype, url=url)
1806 source=srctype, url=url)
1795
1807
1796 # never return 0 here:
1808 # never return 0 here:
1797 if newheads < oldheads:
1809 if newheads < oldheads:
1798 return newheads - oldheads - 1
1810 return newheads - oldheads - 1
1799 else:
1811 else:
1800 return newheads - oldheads + 1
1812 return newheads - oldheads + 1
1801
1813
1802
1814
1803 def stream_in(self, remote, requirements):
1815 def stream_in(self, remote, requirements):
1804 fp = remote.stream_out()
1816 fp = remote.stream_out()
1805 l = fp.readline()
1817 l = fp.readline()
1806 try:
1818 try:
1807 resp = int(l)
1819 resp = int(l)
1808 except ValueError:
1820 except ValueError:
1809 raise error.ResponseError(
1821 raise error.ResponseError(
1810 _('Unexpected response from remote server:'), l)
1822 _('Unexpected response from remote server:'), l)
1811 if resp == 1:
1823 if resp == 1:
1812 raise util.Abort(_('operation forbidden by server'))
1824 raise util.Abort(_('operation forbidden by server'))
1813 elif resp == 2:
1825 elif resp == 2:
1814 raise util.Abort(_('locking the remote repository failed'))
1826 raise util.Abort(_('locking the remote repository failed'))
1815 elif resp != 0:
1827 elif resp != 0:
1816 raise util.Abort(_('the server sent an unknown error code'))
1828 raise util.Abort(_('the server sent an unknown error code'))
1817 self.ui.status(_('streaming all changes\n'))
1829 self.ui.status(_('streaming all changes\n'))
1818 l = fp.readline()
1830 l = fp.readline()
1819 try:
1831 try:
1820 total_files, total_bytes = map(int, l.split(' ', 1))
1832 total_files, total_bytes = map(int, l.split(' ', 1))
1821 except (ValueError, TypeError):
1833 except (ValueError, TypeError):
1822 raise error.ResponseError(
1834 raise error.ResponseError(
1823 _('Unexpected response from remote server:'), l)
1835 _('Unexpected response from remote server:'), l)
1824 self.ui.status(_('%d files to transfer, %s of data\n') %
1836 self.ui.status(_('%d files to transfer, %s of data\n') %
1825 (total_files, util.bytecount(total_bytes)))
1837 (total_files, util.bytecount(total_bytes)))
1826 start = time.time()
1838 start = time.time()
1827 for i in xrange(total_files):
1839 for i in xrange(total_files):
1828 # XXX doesn't support '\n' or '\r' in filenames
1840 # XXX doesn't support '\n' or '\r' in filenames
1829 l = fp.readline()
1841 l = fp.readline()
1830 try:
1842 try:
1831 name, size = l.split('\0', 1)
1843 name, size = l.split('\0', 1)
1832 size = int(size)
1844 size = int(size)
1833 except (ValueError, TypeError):
1845 except (ValueError, TypeError):
1834 raise error.ResponseError(
1846 raise error.ResponseError(
1835 _('Unexpected response from remote server:'), l)
1847 _('Unexpected response from remote server:'), l)
1836 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1848 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1837 # for backwards compat, name was partially encoded
1849 # for backwards compat, name was partially encoded
1838 ofp = self.sopener(store.decodedir(name), 'w')
1850 ofp = self.sopener(store.decodedir(name), 'w')
1839 for chunk in util.filechunkiter(fp, limit=size):
1851 for chunk in util.filechunkiter(fp, limit=size):
1840 ofp.write(chunk)
1852 ofp.write(chunk)
1841 ofp.close()
1853 ofp.close()
1842 elapsed = time.time() - start
1854 elapsed = time.time() - start
1843 if elapsed <= 0:
1855 if elapsed <= 0:
1844 elapsed = 0.001
1856 elapsed = 0.001
1845 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1857 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1846 (util.bytecount(total_bytes), elapsed,
1858 (util.bytecount(total_bytes), elapsed,
1847 util.bytecount(total_bytes / elapsed)))
1859 util.bytecount(total_bytes / elapsed)))
1848
1860
1849 # new requirements = old non-format requirements + new format-related
1861 # new requirements = old non-format requirements + new format-related
1850 # requirements from the streamed-in repository
1862 # requirements from the streamed-in repository
1851 requirements.update(set(self.requirements) - self.supportedformats)
1863 requirements.update(set(self.requirements) - self.supportedformats)
1852 self._applyrequirements(requirements)
1864 self._applyrequirements(requirements)
1853 self._writerequirements()
1865 self._writerequirements()
1854
1866
1855 self.invalidate()
1867 self.invalidate()
1856 return len(self.heads()) + 1
1868 return len(self.heads()) + 1
1857
1869
1858 def clone(self, remote, heads=[], stream=False):
1870 def clone(self, remote, heads=[], stream=False):
1859 '''clone remote repository.
1871 '''clone remote repository.
1860
1872
1861 keyword arguments:
1873 keyword arguments:
1862 heads: list of revs to clone (forces use of pull)
1874 heads: list of revs to clone (forces use of pull)
1863 stream: use streaming clone if possible'''
1875 stream: use streaming clone if possible'''
1864
1876
1865 # now, all clients that can request uncompressed clones can
1877 # now, all clients that can request uncompressed clones can
1866 # read repo formats supported by all servers that can serve
1878 # read repo formats supported by all servers that can serve
1867 # them.
1879 # them.
1868
1880
1869 # if revlog format changes, client will have to check version
1881 # if revlog format changes, client will have to check version
1870 # and format flags on "stream" capability, and use
1882 # and format flags on "stream" capability, and use
1871 # uncompressed only if compatible.
1883 # uncompressed only if compatible.
1872
1884
1873 if stream and not heads:
1885 if stream and not heads:
1874 # 'stream' means remote revlog format is revlogv1 only
1886 # 'stream' means remote revlog format is revlogv1 only
1875 if remote.capable('stream'):
1887 if remote.capable('stream'):
1876 return self.stream_in(remote, set(('revlogv1',)))
1888 return self.stream_in(remote, set(('revlogv1',)))
1877 # otherwise, 'streamreqs' contains the remote revlog format
1889 # otherwise, 'streamreqs' contains the remote revlog format
1878 streamreqs = remote.capable('streamreqs')
1890 streamreqs = remote.capable('streamreqs')
1879 if streamreqs:
1891 if streamreqs:
1880 streamreqs = set(streamreqs.split(','))
1892 streamreqs = set(streamreqs.split(','))
1881 # if we support it, stream in and adjust our requirements
1893 # if we support it, stream in and adjust our requirements
1882 if not streamreqs - self.supportedformats:
1894 if not streamreqs - self.supportedformats:
1883 return self.stream_in(remote, streamreqs)
1895 return self.stream_in(remote, streamreqs)
1884 return self.pull(remote, heads)
1896 return self.pull(remote, heads)
1885
1897
1886 def pushkey(self, namespace, key, old, new):
1898 def pushkey(self, namespace, key, old, new):
1887 return pushkey.push(self, namespace, key, old, new)
1899 return pushkey.push(self, namespace, key, old, new)
1888
1900
1889 def listkeys(self, namespace):
1901 def listkeys(self, namespace):
1890 return pushkey.list(self, namespace)
1902 return pushkey.list(self, namespace)
1891
1903
1892 # used to avoid circular references so destructors work
1904 # used to avoid circular references so destructors work
1893 def aftertrans(files):
1905 def aftertrans(files):
1894 renamefiles = [tuple(t) for t in files]
1906 renamefiles = [tuple(t) for t in files]
1895 def a():
1907 def a():
1896 for src, dest in renamefiles:
1908 for src, dest in renamefiles:
1897 util.rename(src, dest)
1909 util.rename(src, dest)
1898 return a
1910 return a
1899
1911
1900 def instance(ui, path, create):
1912 def instance(ui, path, create):
1901 return localrepository(ui, util.drop_scheme('file', path), create)
1913 return localrepository(ui, util.drop_scheme('file', path), create)
1902
1914
1903 def islocal(path):
1915 def islocal(path):
1904 return True
1916 return True
General Comments 0
You need to be logged in to leave comments. Login now