##// END OF EJS Templates
bookmarks: do not forward merged bookmark (issue1877)
David Soria Parra -
r14498:4d958d1b stable
parent child Browse files
Show More
@@ -0,0 +1,45 b''
1 http://mercurial.selenic.com/bts/issue1877
2
3 $ hg init a
4 $ cd a
5 $ echo a > a
6 $ hg add a
7 $ hg ci -m 'a'
8 $ echo b > a
9 $ hg ci -m'b'
10 $ hg up 0
11 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 $ hg book main
13 $ hg book
14 * main 0:cb9a9f314b8b
15 $ echo c > c
16 $ hg add c
17 $ hg ci -m'c'
18 created new head
19 $ hg book
20 * main 2:d36c0562f908
21 $ hg heads
22 changeset: 2:d36c0562f908
23 bookmark: main
24 tag: tip
25 parent: 0:cb9a9f314b8b
26 user: test
27 date: Thu Jan 01 00:00:00 1970 +0000
28 summary: c
29
30 changeset: 1:1e6c11564562
31 user: test
32 date: Thu Jan 01 00:00:00 1970 +0000
33 summary: b
34
35 $ hg up 1e6c11564562
36 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
37 $ hg merge main
38 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 (branch merge, don't forget to commit)
40 $ hg book
41 main 2:d36c0562f908
42 $ hg ci -m'merge'
43 $ hg book
44 main 2:d36c0562f908
45
@@ -1,2049 +1,2046 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RequirementError(
78 raise error.RequirementError(
79 _("requirement '%s' not supported") % r)
79 _("requirement '%s' not supported") % r)
80
80
81 self.sharedpath = self.path
81 self.sharedpath = self.path
82 try:
82 try:
83 s = os.path.realpath(self.opener("sharedpath").read())
83 s = os.path.realpath(self.opener("sharedpath").read())
84 if not os.path.exists(s):
84 if not os.path.exists(s):
85 raise error.RepoError(
85 raise error.RepoError(
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 self.sharedpath = s
87 self.sharedpath = s
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91
91
92 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.store = store.store(requirements, self.sharedpath, util.opener)
93 self.spath = self.store.path
93 self.spath = self.store.path
94 self.sopener = self.store.opener
94 self.sopener = self.store.opener
95 self.sjoin = self.store.join
95 self.sjoin = self.store.join
96 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
97 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
98 if create:
98 if create:
99 self._writerequirements()
99 self._writerequirements()
100
100
101 # These two define the set of tags for this repository. _tags
101 # These two define the set of tags for this repository. _tags
102 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # 'local'. (Global tags are defined by .hgtags across all
103 # 'local'. (Global tags are defined by .hgtags across all
104 # heads, and local tags are defined in .hg/localtags.) They
104 # heads, and local tags are defined in .hg/localtags.) They
105 # constitute the in-memory cache of tags.
105 # constitute the in-memory cache of tags.
106 self._tags = None
106 self._tags = None
107 self._tagtypes = None
107 self._tagtypes = None
108
108
109 self._branchcache = None
109 self._branchcache = None
110 self._branchcachetip = None
110 self._branchcachetip = None
111 self.nodetagscache = None
111 self.nodetagscache = None
112 self.filterpats = {}
112 self.filterpats = {}
113 self._datafilters = {}
113 self._datafilters = {}
114 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
115
115
116 def _applyrequirements(self, requirements):
116 def _applyrequirements(self, requirements):
117 self.requirements = requirements
117 self.requirements = requirements
118 self.sopener.options = {}
118 self.sopener.options = {}
119 if 'parentdelta' in requirements:
119 if 'parentdelta' in requirements:
120 self.sopener.options['parentdelta'] = 1
120 self.sopener.options['parentdelta'] = 1
121
121
122 def _writerequirements(self):
122 def _writerequirements(self):
123 reqfile = self.opener("requires", "w")
123 reqfile = self.opener("requires", "w")
124 for r in self.requirements:
124 for r in self.requirements:
125 reqfile.write("%s\n" % r)
125 reqfile.write("%s\n" % r)
126 reqfile.close()
126 reqfile.close()
127
127
128 def _checknested(self, path):
128 def _checknested(self, path):
129 """Determine if path is a legal nested repository."""
129 """Determine if path is a legal nested repository."""
130 if not path.startswith(self.root):
130 if not path.startswith(self.root):
131 return False
131 return False
132 subpath = path[len(self.root) + 1:]
132 subpath = path[len(self.root) + 1:]
133
133
134 # XXX: Checking against the current working copy is wrong in
134 # XXX: Checking against the current working copy is wrong in
135 # the sense that it can reject things like
135 # the sense that it can reject things like
136 #
136 #
137 # $ hg cat -r 10 sub/x.txt
137 # $ hg cat -r 10 sub/x.txt
138 #
138 #
139 # if sub/ is no longer a subrepository in the working copy
139 # if sub/ is no longer a subrepository in the working copy
140 # parent revision.
140 # parent revision.
141 #
141 #
142 # However, it can of course also allow things that would have
142 # However, it can of course also allow things that would have
143 # been rejected before, such as the above cat command if sub/
143 # been rejected before, such as the above cat command if sub/
144 # is a subrepository now, but was a normal directory before.
144 # is a subrepository now, but was a normal directory before.
145 # The old path auditor would have rejected by mistake since it
145 # The old path auditor would have rejected by mistake since it
146 # panics when it sees sub/.hg/.
146 # panics when it sees sub/.hg/.
147 #
147 #
148 # All in all, checking against the working copy seems sensible
148 # All in all, checking against the working copy seems sensible
149 # since we want to prevent access to nested repositories on
149 # since we want to prevent access to nested repositories on
150 # the filesystem *now*.
150 # the filesystem *now*.
151 ctx = self[None]
151 ctx = self[None]
152 parts = util.splitpath(subpath)
152 parts = util.splitpath(subpath)
153 while parts:
153 while parts:
154 prefix = os.sep.join(parts)
154 prefix = os.sep.join(parts)
155 if prefix in ctx.substate:
155 if prefix in ctx.substate:
156 if prefix == subpath:
156 if prefix == subpath:
157 return True
157 return True
158 else:
158 else:
159 sub = ctx.sub(prefix)
159 sub = ctx.sub(prefix)
160 return sub.checknested(subpath[len(prefix) + 1:])
160 return sub.checknested(subpath[len(prefix) + 1:])
161 else:
161 else:
162 parts.pop()
162 parts.pop()
163 return False
163 return False
164
164
165 @util.propertycache
165 @util.propertycache
166 def _bookmarks(self):
166 def _bookmarks(self):
167 return bookmarks.read(self)
167 return bookmarks.read(self)
168
168
169 @util.propertycache
169 @util.propertycache
170 def _bookmarkcurrent(self):
170 def _bookmarkcurrent(self):
171 return bookmarks.readcurrent(self)
171 return bookmarks.readcurrent(self)
172
172
173 @propertycache
173 @propertycache
174 def changelog(self):
174 def changelog(self):
175 c = changelog.changelog(self.sopener)
175 c = changelog.changelog(self.sopener)
176 if 'HG_PENDING' in os.environ:
176 if 'HG_PENDING' in os.environ:
177 p = os.environ['HG_PENDING']
177 p = os.environ['HG_PENDING']
178 if p.startswith(self.root):
178 if p.startswith(self.root):
179 c.readpending('00changelog.i.a')
179 c.readpending('00changelog.i.a')
180 self.sopener.options['defversion'] = c.version
180 self.sopener.options['defversion'] = c.version
181 return c
181 return c
182
182
183 @propertycache
183 @propertycache
184 def manifest(self):
184 def manifest(self):
185 return manifest.manifest(self.sopener)
185 return manifest.manifest(self.sopener)
186
186
187 @propertycache
187 @propertycache
188 def dirstate(self):
188 def dirstate(self):
189 warned = [0]
189 warned = [0]
190 def validate(node):
190 def validate(node):
191 try:
191 try:
192 r = self.changelog.rev(node)
192 r = self.changelog.rev(node)
193 return node
193 return node
194 except error.LookupError:
194 except error.LookupError:
195 if not warned[0]:
195 if not warned[0]:
196 warned[0] = True
196 warned[0] = True
197 self.ui.warn(_("warning: ignoring unknown"
197 self.ui.warn(_("warning: ignoring unknown"
198 " working parent %s!\n") % short(node))
198 " working parent %s!\n") % short(node))
199 return nullid
199 return nullid
200
200
201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202
202
203 def __getitem__(self, changeid):
203 def __getitem__(self, changeid):
204 if changeid is None:
204 if changeid is None:
205 return context.workingctx(self)
205 return context.workingctx(self)
206 return context.changectx(self, changeid)
206 return context.changectx(self, changeid)
207
207
208 def __contains__(self, changeid):
208 def __contains__(self, changeid):
209 try:
209 try:
210 return bool(self.lookup(changeid))
210 return bool(self.lookup(changeid))
211 except error.RepoLookupError:
211 except error.RepoLookupError:
212 return False
212 return False
213
213
214 def __nonzero__(self):
214 def __nonzero__(self):
215 return True
215 return True
216
216
217 def __len__(self):
217 def __len__(self):
218 return len(self.changelog)
218 return len(self.changelog)
219
219
220 def __iter__(self):
220 def __iter__(self):
221 for i in xrange(len(self)):
221 for i in xrange(len(self)):
222 yield i
222 yield i
223
223
224 def url(self):
224 def url(self):
225 return 'file:' + self.root
225 return 'file:' + self.root
226
226
227 def hook(self, name, throw=False, **args):
227 def hook(self, name, throw=False, **args):
228 return hook.hook(self.ui, self, name, throw, **args)
228 return hook.hook(self.ui, self, name, throw, **args)
229
229
230 tag_disallowed = ':\r\n'
230 tag_disallowed = ':\r\n'
231
231
232 def _tag(self, names, node, message, local, user, date, extra={}):
232 def _tag(self, names, node, message, local, user, date, extra={}):
233 if isinstance(names, str):
233 if isinstance(names, str):
234 allchars = names
234 allchars = names
235 names = (names,)
235 names = (names,)
236 else:
236 else:
237 allchars = ''.join(names)
237 allchars = ''.join(names)
238 for c in self.tag_disallowed:
238 for c in self.tag_disallowed:
239 if c in allchars:
239 if c in allchars:
240 raise util.Abort(_('%r cannot be used in a tag name') % c)
240 raise util.Abort(_('%r cannot be used in a tag name') % c)
241
241
242 branches = self.branchmap()
242 branches = self.branchmap()
243 for name in names:
243 for name in names:
244 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 local=local)
245 local=local)
246 if name in branches:
246 if name in branches:
247 self.ui.warn(_("warning: tag %s conflicts with existing"
247 self.ui.warn(_("warning: tag %s conflicts with existing"
248 " branch name\n") % name)
248 " branch name\n") % name)
249
249
250 def writetags(fp, names, munge, prevtags):
250 def writetags(fp, names, munge, prevtags):
251 fp.seek(0, 2)
251 fp.seek(0, 2)
252 if prevtags and prevtags[-1] != '\n':
252 if prevtags and prevtags[-1] != '\n':
253 fp.write('\n')
253 fp.write('\n')
254 for name in names:
254 for name in names:
255 m = munge and munge(name) or name
255 m = munge and munge(name) or name
256 if self._tagtypes and name in self._tagtypes:
256 if self._tagtypes and name in self._tagtypes:
257 old = self._tags.get(name, nullid)
257 old = self._tags.get(name, nullid)
258 fp.write('%s %s\n' % (hex(old), m))
258 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(node), m))
259 fp.write('%s %s\n' % (hex(node), m))
260 fp.close()
260 fp.close()
261
261
262 prevtags = ''
262 prevtags = ''
263 if local:
263 if local:
264 try:
264 try:
265 fp = self.opener('localtags', 'r+')
265 fp = self.opener('localtags', 'r+')
266 except IOError:
266 except IOError:
267 fp = self.opener('localtags', 'a')
267 fp = self.opener('localtags', 'a')
268 else:
268 else:
269 prevtags = fp.read()
269 prevtags = fp.read()
270
270
271 # local tags are stored in the current charset
271 # local tags are stored in the current charset
272 writetags(fp, names, None, prevtags)
272 writetags(fp, names, None, prevtags)
273 for name in names:
273 for name in names:
274 self.hook('tag', node=hex(node), tag=name, local=local)
274 self.hook('tag', node=hex(node), tag=name, local=local)
275 return
275 return
276
276
277 try:
277 try:
278 fp = self.wfile('.hgtags', 'rb+')
278 fp = self.wfile('.hgtags', 'rb+')
279 except IOError:
279 except IOError:
280 fp = self.wfile('.hgtags', 'ab')
280 fp = self.wfile('.hgtags', 'ab')
281 else:
281 else:
282 prevtags = fp.read()
282 prevtags = fp.read()
283
283
284 # committed tags are stored in UTF-8
284 # committed tags are stored in UTF-8
285 writetags(fp, names, encoding.fromlocal, prevtags)
285 writetags(fp, names, encoding.fromlocal, prevtags)
286
286
287 fp.close()
287 fp.close()
288
288
289 if '.hgtags' not in self.dirstate:
289 if '.hgtags' not in self.dirstate:
290 self[None].add(['.hgtags'])
290 self[None].add(['.hgtags'])
291
291
292 m = matchmod.exact(self.root, '', ['.hgtags'])
292 m = matchmod.exact(self.root, '', ['.hgtags'])
293 tagnode = self.commit(message, user, date, extra=extra, match=m)
293 tagnode = self.commit(message, user, date, extra=extra, match=m)
294
294
295 for name in names:
295 for name in names:
296 self.hook('tag', node=hex(node), tag=name, local=local)
296 self.hook('tag', node=hex(node), tag=name, local=local)
297
297
298 return tagnode
298 return tagnode
299
299
300 def tag(self, names, node, message, local, user, date):
300 def tag(self, names, node, message, local, user, date):
301 '''tag a revision with one or more symbolic names.
301 '''tag a revision with one or more symbolic names.
302
302
303 names is a list of strings or, when adding a single tag, names may be a
303 names is a list of strings or, when adding a single tag, names may be a
304 string.
304 string.
305
305
306 if local is True, the tags are stored in a per-repository file.
306 if local is True, the tags are stored in a per-repository file.
307 otherwise, they are stored in the .hgtags file, and a new
307 otherwise, they are stored in the .hgtags file, and a new
308 changeset is committed with the change.
308 changeset is committed with the change.
309
309
310 keyword arguments:
310 keyword arguments:
311
311
312 local: whether to store tags in non-version-controlled file
312 local: whether to store tags in non-version-controlled file
313 (default False)
313 (default False)
314
314
315 message: commit message to use if committing
315 message: commit message to use if committing
316
316
317 user: name of user to use if committing
317 user: name of user to use if committing
318
318
319 date: date tuple to use if committing'''
319 date: date tuple to use if committing'''
320
320
321 if not local:
321 if not local:
322 for x in self.status()[:5]:
322 for x in self.status()[:5]:
323 if '.hgtags' in x:
323 if '.hgtags' in x:
324 raise util.Abort(_('working copy of .hgtags is changed '
324 raise util.Abort(_('working copy of .hgtags is changed '
325 '(please commit .hgtags manually)'))
325 '(please commit .hgtags manually)'))
326
326
327 self.tags() # instantiate the cache
327 self.tags() # instantiate the cache
328 self._tag(names, node, message, local, user, date)
328 self._tag(names, node, message, local, user, date)
329
329
330 def tags(self):
330 def tags(self):
331 '''return a mapping of tag to node'''
331 '''return a mapping of tag to node'''
332 if self._tags is None:
332 if self._tags is None:
333 (self._tags, self._tagtypes) = self._findtags()
333 (self._tags, self._tagtypes) = self._findtags()
334
334
335 return self._tags
335 return self._tags
336
336
337 def _findtags(self):
337 def _findtags(self):
338 '''Do the hard work of finding tags. Return a pair of dicts
338 '''Do the hard work of finding tags. Return a pair of dicts
339 (tags, tagtypes) where tags maps tag name to node, and tagtypes
339 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 maps tag name to a string like \'global\' or \'local\'.
340 maps tag name to a string like \'global\' or \'local\'.
341 Subclasses or extensions are free to add their own tags, but
341 Subclasses or extensions are free to add their own tags, but
342 should be aware that the returned dicts will be retained for the
342 should be aware that the returned dicts will be retained for the
343 duration of the localrepo object.'''
343 duration of the localrepo object.'''
344
344
345 # XXX what tagtype should subclasses/extensions use? Currently
345 # XXX what tagtype should subclasses/extensions use? Currently
346 # mq and bookmarks add tags, but do not set the tagtype at all.
346 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # Should each extension invent its own tag type? Should there
347 # Should each extension invent its own tag type? Should there
348 # be one tagtype for all such "virtual" tags? Or is the status
348 # be one tagtype for all such "virtual" tags? Or is the status
349 # quo fine?
349 # quo fine?
350
350
351 alltags = {} # map tag name to (node, hist)
351 alltags = {} # map tag name to (node, hist)
352 tagtypes = {}
352 tagtypes = {}
353
353
354 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
354 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356
356
357 # Build the return dicts. Have to re-encode tag names because
357 # Build the return dicts. Have to re-encode tag names because
358 # the tags module always uses UTF-8 (in order not to lose info
358 # the tags module always uses UTF-8 (in order not to lose info
359 # writing to the cache), but the rest of Mercurial wants them in
359 # writing to the cache), but the rest of Mercurial wants them in
360 # local encoding.
360 # local encoding.
361 tags = {}
361 tags = {}
362 for (name, (node, hist)) in alltags.iteritems():
362 for (name, (node, hist)) in alltags.iteritems():
363 if node != nullid:
363 if node != nullid:
364 tags[encoding.tolocal(name)] = node
364 tags[encoding.tolocal(name)] = node
365 tags['tip'] = self.changelog.tip()
365 tags['tip'] = self.changelog.tip()
366 tagtypes = dict([(encoding.tolocal(name), value)
366 tagtypes = dict([(encoding.tolocal(name), value)
367 for (name, value) in tagtypes.iteritems()])
367 for (name, value) in tagtypes.iteritems()])
368 return (tags, tagtypes)
368 return (tags, tagtypes)
369
369
370 def tagtype(self, tagname):
370 def tagtype(self, tagname):
371 '''
371 '''
372 return the type of the given tag. result can be:
372 return the type of the given tag. result can be:
373
373
374 'local' : a local tag
374 'local' : a local tag
375 'global' : a global tag
375 'global' : a global tag
376 None : tag does not exist
376 None : tag does not exist
377 '''
377 '''
378
378
379 self.tags()
379 self.tags()
380
380
381 return self._tagtypes.get(tagname)
381 return self._tagtypes.get(tagname)
382
382
383 def tagslist(self):
383 def tagslist(self):
384 '''return a list of tags ordered by revision'''
384 '''return a list of tags ordered by revision'''
385 l = []
385 l = []
386 for t, n in self.tags().iteritems():
386 for t, n in self.tags().iteritems():
387 try:
387 try:
388 r = self.changelog.rev(n)
388 r = self.changelog.rev(n)
389 except:
389 except:
390 r = -2 # sort to the beginning of the list if unknown
390 r = -2 # sort to the beginning of the list if unknown
391 l.append((r, t, n))
391 l.append((r, t, n))
392 return [(t, n) for r, t, n in sorted(l)]
392 return [(t, n) for r, t, n in sorted(l)]
393
393
394 def nodetags(self, node):
394 def nodetags(self, node):
395 '''return the tags associated with a node'''
395 '''return the tags associated with a node'''
396 if not self.nodetagscache:
396 if not self.nodetagscache:
397 self.nodetagscache = {}
397 self.nodetagscache = {}
398 for t, n in self.tags().iteritems():
398 for t, n in self.tags().iteritems():
399 self.nodetagscache.setdefault(n, []).append(t)
399 self.nodetagscache.setdefault(n, []).append(t)
400 for tags in self.nodetagscache.itervalues():
400 for tags in self.nodetagscache.itervalues():
401 tags.sort()
401 tags.sort()
402 return self.nodetagscache.get(node, [])
402 return self.nodetagscache.get(node, [])
403
403
404 def nodebookmarks(self, node):
404 def nodebookmarks(self, node):
405 marks = []
405 marks = []
406 for bookmark, n in self._bookmarks.iteritems():
406 for bookmark, n in self._bookmarks.iteritems():
407 if n == node:
407 if n == node:
408 marks.append(bookmark)
408 marks.append(bookmark)
409 return sorted(marks)
409 return sorted(marks)
410
410
411 def _branchtags(self, partial, lrev):
411 def _branchtags(self, partial, lrev):
412 # TODO: rename this function?
412 # TODO: rename this function?
413 tiprev = len(self) - 1
413 tiprev = len(self) - 1
414 if lrev != tiprev:
414 if lrev != tiprev:
415 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
415 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
416 self._updatebranchcache(partial, ctxgen)
416 self._updatebranchcache(partial, ctxgen)
417 self._writebranchcache(partial, self.changelog.tip(), tiprev)
417 self._writebranchcache(partial, self.changelog.tip(), tiprev)
418
418
419 return partial
419 return partial
420
420
421 def updatebranchcache(self):
421 def updatebranchcache(self):
422 tip = self.changelog.tip()
422 tip = self.changelog.tip()
423 if self._branchcache is not None and self._branchcachetip == tip:
423 if self._branchcache is not None and self._branchcachetip == tip:
424 return self._branchcache
424 return self._branchcache
425
425
426 oldtip = self._branchcachetip
426 oldtip = self._branchcachetip
427 self._branchcachetip = tip
427 self._branchcachetip = tip
428 if oldtip is None or oldtip not in self.changelog.nodemap:
428 if oldtip is None or oldtip not in self.changelog.nodemap:
429 partial, last, lrev = self._readbranchcache()
429 partial, last, lrev = self._readbranchcache()
430 else:
430 else:
431 lrev = self.changelog.rev(oldtip)
431 lrev = self.changelog.rev(oldtip)
432 partial = self._branchcache
432 partial = self._branchcache
433
433
434 self._branchtags(partial, lrev)
434 self._branchtags(partial, lrev)
435 # this private cache holds all heads (not just tips)
435 # this private cache holds all heads (not just tips)
436 self._branchcache = partial
436 self._branchcache = partial
437
437
438 def branchmap(self):
438 def branchmap(self):
439 '''returns a dictionary {branch: [branchheads]}'''
439 '''returns a dictionary {branch: [branchheads]}'''
440 self.updatebranchcache()
440 self.updatebranchcache()
441 return self._branchcache
441 return self._branchcache
442
442
443 def branchtags(self):
443 def branchtags(self):
444 '''return a dict where branch names map to the tipmost head of
444 '''return a dict where branch names map to the tipmost head of
445 the branch, open heads come before closed'''
445 the branch, open heads come before closed'''
446 bt = {}
446 bt = {}
447 for bn, heads in self.branchmap().iteritems():
447 for bn, heads in self.branchmap().iteritems():
448 tip = heads[-1]
448 tip = heads[-1]
449 for h in reversed(heads):
449 for h in reversed(heads):
450 if 'close' not in self.changelog.read(h)[5]:
450 if 'close' not in self.changelog.read(h)[5]:
451 tip = h
451 tip = h
452 break
452 break
453 bt[bn] = tip
453 bt[bn] = tip
454 return bt
454 return bt
455
455
456 def _readbranchcache(self):
456 def _readbranchcache(self):
457 partial = {}
457 partial = {}
458 try:
458 try:
459 f = self.opener("cache/branchheads")
459 f = self.opener("cache/branchheads")
460 lines = f.read().split('\n')
460 lines = f.read().split('\n')
461 f.close()
461 f.close()
462 except (IOError, OSError):
462 except (IOError, OSError):
463 return {}, nullid, nullrev
463 return {}, nullid, nullrev
464
464
465 try:
465 try:
466 last, lrev = lines.pop(0).split(" ", 1)
466 last, lrev = lines.pop(0).split(" ", 1)
467 last, lrev = bin(last), int(lrev)
467 last, lrev = bin(last), int(lrev)
468 if lrev >= len(self) or self[lrev].node() != last:
468 if lrev >= len(self) or self[lrev].node() != last:
469 # invalidate the cache
469 # invalidate the cache
470 raise ValueError('invalidating branch cache (tip differs)')
470 raise ValueError('invalidating branch cache (tip differs)')
471 for l in lines:
471 for l in lines:
472 if not l:
472 if not l:
473 continue
473 continue
474 node, label = l.split(" ", 1)
474 node, label = l.split(" ", 1)
475 label = encoding.tolocal(label.strip())
475 label = encoding.tolocal(label.strip())
476 partial.setdefault(label, []).append(bin(node))
476 partial.setdefault(label, []).append(bin(node))
477 except KeyboardInterrupt:
477 except KeyboardInterrupt:
478 raise
478 raise
479 except Exception, inst:
479 except Exception, inst:
480 if self.ui.debugflag:
480 if self.ui.debugflag:
481 self.ui.warn(str(inst), '\n')
481 self.ui.warn(str(inst), '\n')
482 partial, last, lrev = {}, nullid, nullrev
482 partial, last, lrev = {}, nullid, nullrev
483 return partial, last, lrev
483 return partial, last, lrev
484
484
485 def _writebranchcache(self, branches, tip, tiprev):
485 def _writebranchcache(self, branches, tip, tiprev):
486 try:
486 try:
487 f = self.opener("cache/branchheads", "w", atomictemp=True)
487 f = self.opener("cache/branchheads", "w", atomictemp=True)
488 f.write("%s %s\n" % (hex(tip), tiprev))
488 f.write("%s %s\n" % (hex(tip), tiprev))
489 for label, nodes in branches.iteritems():
489 for label, nodes in branches.iteritems():
490 for node in nodes:
490 for node in nodes:
491 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
491 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
492 f.rename()
492 f.rename()
493 except (IOError, OSError):
493 except (IOError, OSError):
494 pass
494 pass
495
495
496 def _updatebranchcache(self, partial, ctxgen):
496 def _updatebranchcache(self, partial, ctxgen):
497 # collect new branch entries
497 # collect new branch entries
498 newbranches = {}
498 newbranches = {}
499 for c in ctxgen:
499 for c in ctxgen:
500 newbranches.setdefault(c.branch(), []).append(c.node())
500 newbranches.setdefault(c.branch(), []).append(c.node())
501 # if older branchheads are reachable from new ones, they aren't
501 # if older branchheads are reachable from new ones, they aren't
502 # really branchheads. Note checking parents is insufficient:
502 # really branchheads. Note checking parents is insufficient:
503 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
503 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
504 for branch, newnodes in newbranches.iteritems():
504 for branch, newnodes in newbranches.iteritems():
505 bheads = partial.setdefault(branch, [])
505 bheads = partial.setdefault(branch, [])
506 bheads.extend(newnodes)
506 bheads.extend(newnodes)
507 if len(bheads) <= 1:
507 if len(bheads) <= 1:
508 continue
508 continue
509 # starting from tip means fewer passes over reachable
509 # starting from tip means fewer passes over reachable
510 while newnodes:
510 while newnodes:
511 latest = newnodes.pop()
511 latest = newnodes.pop()
512 if latest not in bheads:
512 if latest not in bheads:
513 continue
513 continue
514 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
514 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
515 reachable = self.changelog.reachable(latest, minbhrev)
515 reachable = self.changelog.reachable(latest, minbhrev)
516 reachable.remove(latest)
516 reachable.remove(latest)
517 bheads = [b for b in bheads if b not in reachable]
517 bheads = [b for b in bheads if b not in reachable]
518 partial[branch] = bheads
518 partial[branch] = bheads
519
519
520 def lookup(self, key):
520 def lookup(self, key):
521 if isinstance(key, int):
521 if isinstance(key, int):
522 return self.changelog.node(key)
522 return self.changelog.node(key)
523 elif key == '.':
523 elif key == '.':
524 return self.dirstate.parents()[0]
524 return self.dirstate.parents()[0]
525 elif key == 'null':
525 elif key == 'null':
526 return nullid
526 return nullid
527 elif key == 'tip':
527 elif key == 'tip':
528 return self.changelog.tip()
528 return self.changelog.tip()
529 n = self.changelog._match(key)
529 n = self.changelog._match(key)
530 if n:
530 if n:
531 return n
531 return n
532 if key in self._bookmarks:
532 if key in self._bookmarks:
533 return self._bookmarks[key]
533 return self._bookmarks[key]
534 if key in self.tags():
534 if key in self.tags():
535 return self.tags()[key]
535 return self.tags()[key]
536 if key in self.branchtags():
536 if key in self.branchtags():
537 return self.branchtags()[key]
537 return self.branchtags()[key]
538 n = self.changelog._partialmatch(key)
538 n = self.changelog._partialmatch(key)
539 if n:
539 if n:
540 return n
540 return n
541
541
542 # can't find key, check if it might have come from damaged dirstate
542 # can't find key, check if it might have come from damaged dirstate
543 if key in self.dirstate.parents():
543 if key in self.dirstate.parents():
544 raise error.Abort(_("working directory has unknown parent '%s'!")
544 raise error.Abort(_("working directory has unknown parent '%s'!")
545 % short(key))
545 % short(key))
546 try:
546 try:
547 if len(key) == 20:
547 if len(key) == 20:
548 key = hex(key)
548 key = hex(key)
549 except:
549 except:
550 pass
550 pass
551 raise error.RepoLookupError(_("unknown revision '%s'") % key)
551 raise error.RepoLookupError(_("unknown revision '%s'") % key)
552
552
553 def lookupbranch(self, key, remote=None):
553 def lookupbranch(self, key, remote=None):
554 repo = remote or self
554 repo = remote or self
555 if key in repo.branchmap():
555 if key in repo.branchmap():
556 return key
556 return key
557
557
558 repo = (remote and remote.local()) and remote or self
558 repo = (remote and remote.local()) and remote or self
559 return repo[key].branch()
559 return repo[key].branch()
560
560
561 def local(self):
561 def local(self):
562 return True
562 return True
563
563
564 def join(self, f):
564 def join(self, f):
565 return os.path.join(self.path, f)
565 return os.path.join(self.path, f)
566
566
567 def wjoin(self, f):
567 def wjoin(self, f):
568 return os.path.join(self.root, f)
568 return os.path.join(self.root, f)
569
569
570 def file(self, f):
570 def file(self, f):
571 if f[0] == '/':
571 if f[0] == '/':
572 f = f[1:]
572 f = f[1:]
573 return filelog.filelog(self.sopener, f)
573 return filelog.filelog(self.sopener, f)
574
574
575 def changectx(self, changeid):
575 def changectx(self, changeid):
576 return self[changeid]
576 return self[changeid]
577
577
578 def parents(self, changeid=None):
578 def parents(self, changeid=None):
579 '''get list of changectxs for parents of changeid'''
579 '''get list of changectxs for parents of changeid'''
580 return self[changeid].parents()
580 return self[changeid].parents()
581
581
582 def filectx(self, path, changeid=None, fileid=None):
582 def filectx(self, path, changeid=None, fileid=None):
583 """changeid can be a changeset revision, node, or tag.
583 """changeid can be a changeset revision, node, or tag.
584 fileid can be a file revision or node."""
584 fileid can be a file revision or node."""
585 return context.filectx(self, path, changeid, fileid)
585 return context.filectx(self, path, changeid, fileid)
586
586
587 def getcwd(self):
587 def getcwd(self):
588 return self.dirstate.getcwd()
588 return self.dirstate.getcwd()
589
589
590 def pathto(self, f, cwd=None):
590 def pathto(self, f, cwd=None):
591 return self.dirstate.pathto(f, cwd)
591 return self.dirstate.pathto(f, cwd)
592
592
593 def wfile(self, f, mode='r'):
593 def wfile(self, f, mode='r'):
594 return self.wopener(f, mode)
594 return self.wopener(f, mode)
595
595
596 def _link(self, f):
596 def _link(self, f):
597 return os.path.islink(self.wjoin(f))
597 return os.path.islink(self.wjoin(f))
598
598
599 def _loadfilter(self, filter):
599 def _loadfilter(self, filter):
600 if filter not in self.filterpats:
600 if filter not in self.filterpats:
601 l = []
601 l = []
602 for pat, cmd in self.ui.configitems(filter):
602 for pat, cmd in self.ui.configitems(filter):
603 if cmd == '!':
603 if cmd == '!':
604 continue
604 continue
605 mf = matchmod.match(self.root, '', [pat])
605 mf = matchmod.match(self.root, '', [pat])
606 fn = None
606 fn = None
607 params = cmd
607 params = cmd
608 for name, filterfn in self._datafilters.iteritems():
608 for name, filterfn in self._datafilters.iteritems():
609 if cmd.startswith(name):
609 if cmd.startswith(name):
610 fn = filterfn
610 fn = filterfn
611 params = cmd[len(name):].lstrip()
611 params = cmd[len(name):].lstrip()
612 break
612 break
613 if not fn:
613 if not fn:
614 fn = lambda s, c, **kwargs: util.filter(s, c)
614 fn = lambda s, c, **kwargs: util.filter(s, c)
615 # Wrap old filters not supporting keyword arguments
615 # Wrap old filters not supporting keyword arguments
616 if not inspect.getargspec(fn)[2]:
616 if not inspect.getargspec(fn)[2]:
617 oldfn = fn
617 oldfn = fn
618 fn = lambda s, c, **kwargs: oldfn(s, c)
618 fn = lambda s, c, **kwargs: oldfn(s, c)
619 l.append((mf, fn, params))
619 l.append((mf, fn, params))
620 self.filterpats[filter] = l
620 self.filterpats[filter] = l
621 return self.filterpats[filter]
621 return self.filterpats[filter]
622
622
623 def _filter(self, filterpats, filename, data):
623 def _filter(self, filterpats, filename, data):
624 for mf, fn, cmd in filterpats:
624 for mf, fn, cmd in filterpats:
625 if mf(filename):
625 if mf(filename):
626 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
626 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
627 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
627 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
628 break
628 break
629
629
630 return data
630 return data
631
631
632 @propertycache
632 @propertycache
633 def _encodefilterpats(self):
633 def _encodefilterpats(self):
634 return self._loadfilter('encode')
634 return self._loadfilter('encode')
635
635
636 @propertycache
636 @propertycache
637 def _decodefilterpats(self):
637 def _decodefilterpats(self):
638 return self._loadfilter('decode')
638 return self._loadfilter('decode')
639
639
640 def adddatafilter(self, name, filter):
640 def adddatafilter(self, name, filter):
641 self._datafilters[name] = filter
641 self._datafilters[name] = filter
642
642
643 def wread(self, filename):
643 def wread(self, filename):
644 if self._link(filename):
644 if self._link(filename):
645 data = os.readlink(self.wjoin(filename))
645 data = os.readlink(self.wjoin(filename))
646 else:
646 else:
647 data = self.wopener(filename, 'r').read()
647 data = self.wopener(filename, 'r').read()
648 return self._filter(self._encodefilterpats, filename, data)
648 return self._filter(self._encodefilterpats, filename, data)
649
649
650 def wwrite(self, filename, data, flags):
650 def wwrite(self, filename, data, flags):
651 data = self._filter(self._decodefilterpats, filename, data)
651 data = self._filter(self._decodefilterpats, filename, data)
652 if 'l' in flags:
652 if 'l' in flags:
653 self.wopener.symlink(data, filename)
653 self.wopener.symlink(data, filename)
654 else:
654 else:
655 self.wopener(filename, 'w').write(data)
655 self.wopener(filename, 'w').write(data)
656 if 'x' in flags:
656 if 'x' in flags:
657 util.set_flags(self.wjoin(filename), False, True)
657 util.set_flags(self.wjoin(filename), False, True)
658
658
659 def wwritedata(self, filename, data):
659 def wwritedata(self, filename, data):
660 return self._filter(self._decodefilterpats, filename, data)
660 return self._filter(self._decodefilterpats, filename, data)
661
661
662 def transaction(self, desc):
662 def transaction(self, desc):
663 tr = self._transref and self._transref() or None
663 tr = self._transref and self._transref() or None
664 if tr and tr.running():
664 if tr and tr.running():
665 return tr.nest()
665 return tr.nest()
666
666
667 # abort here if the journal already exists
667 # abort here if the journal already exists
668 if os.path.exists(self.sjoin("journal")):
668 if os.path.exists(self.sjoin("journal")):
669 raise error.RepoError(
669 raise error.RepoError(
670 _("abandoned transaction found - run hg recover"))
670 _("abandoned transaction found - run hg recover"))
671
671
672 journalfiles = self._writejournal(desc)
672 journalfiles = self._writejournal(desc)
673 renames = [(x, undoname(x)) for x in journalfiles]
673 renames = [(x, undoname(x)) for x in journalfiles]
674
674
675 tr = transaction.transaction(self.ui.warn, self.sopener,
675 tr = transaction.transaction(self.ui.warn, self.sopener,
676 self.sjoin("journal"),
676 self.sjoin("journal"),
677 aftertrans(renames),
677 aftertrans(renames),
678 self.store.createmode)
678 self.store.createmode)
679 self._transref = weakref.ref(tr)
679 self._transref = weakref.ref(tr)
680 return tr
680 return tr
681
681
682 def _writejournal(self, desc):
682 def _writejournal(self, desc):
683 # save dirstate for rollback
683 # save dirstate for rollback
684 try:
684 try:
685 ds = self.opener("dirstate").read()
685 ds = self.opener("dirstate").read()
686 except IOError:
686 except IOError:
687 ds = ""
687 ds = ""
688 self.opener("journal.dirstate", "w").write(ds)
688 self.opener("journal.dirstate", "w").write(ds)
689 self.opener("journal.branch", "w").write(
689 self.opener("journal.branch", "w").write(
690 encoding.fromlocal(self.dirstate.branch()))
690 encoding.fromlocal(self.dirstate.branch()))
691 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
691 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
692
692
693 bkname = self.join('bookmarks')
693 bkname = self.join('bookmarks')
694 if os.path.exists(bkname):
694 if os.path.exists(bkname):
695 util.copyfile(bkname, self.join('journal.bookmarks'))
695 util.copyfile(bkname, self.join('journal.bookmarks'))
696 else:
696 else:
697 self.opener('journal.bookmarks', 'w').write('')
697 self.opener('journal.bookmarks', 'w').write('')
698
698
699 return (self.sjoin('journal'), self.join('journal.dirstate'),
699 return (self.sjoin('journal'), self.join('journal.dirstate'),
700 self.join('journal.branch'), self.join('journal.desc'),
700 self.join('journal.branch'), self.join('journal.desc'),
701 self.join('journal.bookmarks'))
701 self.join('journal.bookmarks'))
702
702
703 def recover(self):
703 def recover(self):
704 lock = self.lock()
704 lock = self.lock()
705 try:
705 try:
706 if os.path.exists(self.sjoin("journal")):
706 if os.path.exists(self.sjoin("journal")):
707 self.ui.status(_("rolling back interrupted transaction\n"))
707 self.ui.status(_("rolling back interrupted transaction\n"))
708 transaction.rollback(self.sopener, self.sjoin("journal"),
708 transaction.rollback(self.sopener, self.sjoin("journal"),
709 self.ui.warn)
709 self.ui.warn)
710 self.invalidate()
710 self.invalidate()
711 return True
711 return True
712 else:
712 else:
713 self.ui.warn(_("no interrupted transaction available\n"))
713 self.ui.warn(_("no interrupted transaction available\n"))
714 return False
714 return False
715 finally:
715 finally:
716 lock.release()
716 lock.release()
717
717
718 def rollback(self, dryrun=False):
718 def rollback(self, dryrun=False):
719 wlock = lock = None
719 wlock = lock = None
720 try:
720 try:
721 wlock = self.wlock()
721 wlock = self.wlock()
722 lock = self.lock()
722 lock = self.lock()
723 if os.path.exists(self.sjoin("undo")):
723 if os.path.exists(self.sjoin("undo")):
724 try:
724 try:
725 args = self.opener("undo.desc", "r").read().splitlines()
725 args = self.opener("undo.desc", "r").read().splitlines()
726 if len(args) >= 3 and self.ui.verbose:
726 if len(args) >= 3 and self.ui.verbose:
727 desc = _("repository tip rolled back to revision %s"
727 desc = _("repository tip rolled back to revision %s"
728 " (undo %s: %s)\n") % (
728 " (undo %s: %s)\n") % (
729 int(args[0]) - 1, args[1], args[2])
729 int(args[0]) - 1, args[1], args[2])
730 elif len(args) >= 2:
730 elif len(args) >= 2:
731 desc = _("repository tip rolled back to revision %s"
731 desc = _("repository tip rolled back to revision %s"
732 " (undo %s)\n") % (
732 " (undo %s)\n") % (
733 int(args[0]) - 1, args[1])
733 int(args[0]) - 1, args[1])
734 except IOError:
734 except IOError:
735 desc = _("rolling back unknown transaction\n")
735 desc = _("rolling back unknown transaction\n")
736 self.ui.status(desc)
736 self.ui.status(desc)
737 if dryrun:
737 if dryrun:
738 return
738 return
739 transaction.rollback(self.sopener, self.sjoin("undo"),
739 transaction.rollback(self.sopener, self.sjoin("undo"),
740 self.ui.warn)
740 self.ui.warn)
741 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
741 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
742 if os.path.exists(self.join('undo.bookmarks')):
742 if os.path.exists(self.join('undo.bookmarks')):
743 util.rename(self.join('undo.bookmarks'),
743 util.rename(self.join('undo.bookmarks'),
744 self.join('bookmarks'))
744 self.join('bookmarks'))
745 try:
745 try:
746 branch = self.opener("undo.branch").read()
746 branch = self.opener("undo.branch").read()
747 self.dirstate.setbranch(branch)
747 self.dirstate.setbranch(branch)
748 except IOError:
748 except IOError:
749 self.ui.warn(_("named branch could not be reset, "
749 self.ui.warn(_("named branch could not be reset, "
750 "current branch is still: %s\n")
750 "current branch is still: %s\n")
751 % self.dirstate.branch())
751 % self.dirstate.branch())
752 self.invalidate()
752 self.invalidate()
753 self.dirstate.invalidate()
753 self.dirstate.invalidate()
754 self.destroyed()
754 self.destroyed()
755 parents = tuple([p.rev() for p in self.parents()])
755 parents = tuple([p.rev() for p in self.parents()])
756 if len(parents) > 1:
756 if len(parents) > 1:
757 self.ui.status(_("working directory now based on "
757 self.ui.status(_("working directory now based on "
758 "revisions %d and %d\n") % parents)
758 "revisions %d and %d\n") % parents)
759 else:
759 else:
760 self.ui.status(_("working directory now based on "
760 self.ui.status(_("working directory now based on "
761 "revision %d\n") % parents)
761 "revision %d\n") % parents)
762 else:
762 else:
763 self.ui.warn(_("no rollback information available\n"))
763 self.ui.warn(_("no rollback information available\n"))
764 return 1
764 return 1
765 finally:
765 finally:
766 release(lock, wlock)
766 release(lock, wlock)
767
767
768 def invalidatecaches(self):
768 def invalidatecaches(self):
769 self._tags = None
769 self._tags = None
770 self._tagtypes = None
770 self._tagtypes = None
771 self.nodetagscache = None
771 self.nodetagscache = None
772 self._branchcache = None # in UTF-8
772 self._branchcache = None # in UTF-8
773 self._branchcachetip = None
773 self._branchcachetip = None
774
774
775 def invalidate(self):
775 def invalidate(self):
776 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
776 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
777 if a in self.__dict__:
777 if a in self.__dict__:
778 delattr(self, a)
778 delattr(self, a)
779 self.invalidatecaches()
779 self.invalidatecaches()
780
780
781 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
781 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
782 try:
782 try:
783 l = lock.lock(lockname, 0, releasefn, desc=desc)
783 l = lock.lock(lockname, 0, releasefn, desc=desc)
784 except error.LockHeld, inst:
784 except error.LockHeld, inst:
785 if not wait:
785 if not wait:
786 raise
786 raise
787 self.ui.warn(_("waiting for lock on %s held by %r\n") %
787 self.ui.warn(_("waiting for lock on %s held by %r\n") %
788 (desc, inst.locker))
788 (desc, inst.locker))
789 # default to 600 seconds timeout
789 # default to 600 seconds timeout
790 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
790 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
791 releasefn, desc=desc)
791 releasefn, desc=desc)
792 if acquirefn:
792 if acquirefn:
793 acquirefn()
793 acquirefn()
794 return l
794 return l
795
795
796 def lock(self, wait=True):
796 def lock(self, wait=True):
797 '''Lock the repository store (.hg/store) and return a weak reference
797 '''Lock the repository store (.hg/store) and return a weak reference
798 to the lock. Use this before modifying the store (e.g. committing or
798 to the lock. Use this before modifying the store (e.g. committing or
799 stripping). If you are opening a transaction, get a lock as well.)'''
799 stripping). If you are opening a transaction, get a lock as well.)'''
800 l = self._lockref and self._lockref()
800 l = self._lockref and self._lockref()
801 if l is not None and l.held:
801 if l is not None and l.held:
802 l.lock()
802 l.lock()
803 return l
803 return l
804
804
805 l = self._lock(self.sjoin("lock"), wait, self.store.write,
805 l = self._lock(self.sjoin("lock"), wait, self.store.write,
806 self.invalidate, _('repository %s') % self.origroot)
806 self.invalidate, _('repository %s') % self.origroot)
807 self._lockref = weakref.ref(l)
807 self._lockref = weakref.ref(l)
808 return l
808 return l
809
809
810 def wlock(self, wait=True):
810 def wlock(self, wait=True):
811 '''Lock the non-store parts of the repository (everything under
811 '''Lock the non-store parts of the repository (everything under
812 .hg except .hg/store) and return a weak reference to the lock.
812 .hg except .hg/store) and return a weak reference to the lock.
813 Use this before modifying files in .hg.'''
813 Use this before modifying files in .hg.'''
814 l = self._wlockref and self._wlockref()
814 l = self._wlockref and self._wlockref()
815 if l is not None and l.held:
815 if l is not None and l.held:
816 l.lock()
816 l.lock()
817 return l
817 return l
818
818
819 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
819 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
820 self.dirstate.invalidate, _('working directory of %s') %
820 self.dirstate.invalidate, _('working directory of %s') %
821 self.origroot)
821 self.origroot)
822 self._wlockref = weakref.ref(l)
822 self._wlockref = weakref.ref(l)
823 return l
823 return l
824
824
825 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
825 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
826 """
826 """
827 commit an individual file as part of a larger transaction
827 commit an individual file as part of a larger transaction
828 """
828 """
829
829
830 fname = fctx.path()
830 fname = fctx.path()
831 text = fctx.data()
831 text = fctx.data()
832 flog = self.file(fname)
832 flog = self.file(fname)
833 fparent1 = manifest1.get(fname, nullid)
833 fparent1 = manifest1.get(fname, nullid)
834 fparent2 = fparent2o = manifest2.get(fname, nullid)
834 fparent2 = fparent2o = manifest2.get(fname, nullid)
835
835
836 meta = {}
836 meta = {}
837 copy = fctx.renamed()
837 copy = fctx.renamed()
838 if copy and copy[0] != fname:
838 if copy and copy[0] != fname:
839 # Mark the new revision of this file as a copy of another
839 # Mark the new revision of this file as a copy of another
840 # file. This copy data will effectively act as a parent
840 # file. This copy data will effectively act as a parent
841 # of this new revision. If this is a merge, the first
841 # of this new revision. If this is a merge, the first
842 # parent will be the nullid (meaning "look up the copy data")
842 # parent will be the nullid (meaning "look up the copy data")
843 # and the second one will be the other parent. For example:
843 # and the second one will be the other parent. For example:
844 #
844 #
845 # 0 --- 1 --- 3 rev1 changes file foo
845 # 0 --- 1 --- 3 rev1 changes file foo
846 # \ / rev2 renames foo to bar and changes it
846 # \ / rev2 renames foo to bar and changes it
847 # \- 2 -/ rev3 should have bar with all changes and
847 # \- 2 -/ rev3 should have bar with all changes and
848 # should record that bar descends from
848 # should record that bar descends from
849 # bar in rev2 and foo in rev1
849 # bar in rev2 and foo in rev1
850 #
850 #
851 # this allows this merge to succeed:
851 # this allows this merge to succeed:
852 #
852 #
853 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
853 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
854 # \ / merging rev3 and rev4 should use bar@rev2
854 # \ / merging rev3 and rev4 should use bar@rev2
855 # \- 2 --- 4 as the merge base
855 # \- 2 --- 4 as the merge base
856 #
856 #
857
857
858 cfname = copy[0]
858 cfname = copy[0]
859 crev = manifest1.get(cfname)
859 crev = manifest1.get(cfname)
860 newfparent = fparent2
860 newfparent = fparent2
861
861
862 if manifest2: # branch merge
862 if manifest2: # branch merge
863 if fparent2 == nullid or crev is None: # copied on remote side
863 if fparent2 == nullid or crev is None: # copied on remote side
864 if cfname in manifest2:
864 if cfname in manifest2:
865 crev = manifest2[cfname]
865 crev = manifest2[cfname]
866 newfparent = fparent1
866 newfparent = fparent1
867
867
868 # find source in nearest ancestor if we've lost track
868 # find source in nearest ancestor if we've lost track
869 if not crev:
869 if not crev:
870 self.ui.debug(" %s: searching for copy revision for %s\n" %
870 self.ui.debug(" %s: searching for copy revision for %s\n" %
871 (fname, cfname))
871 (fname, cfname))
872 for ancestor in self[None].ancestors():
872 for ancestor in self[None].ancestors():
873 if cfname in ancestor:
873 if cfname in ancestor:
874 crev = ancestor[cfname].filenode()
874 crev = ancestor[cfname].filenode()
875 break
875 break
876
876
877 if crev:
877 if crev:
878 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
878 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
879 meta["copy"] = cfname
879 meta["copy"] = cfname
880 meta["copyrev"] = hex(crev)
880 meta["copyrev"] = hex(crev)
881 fparent1, fparent2 = nullid, newfparent
881 fparent1, fparent2 = nullid, newfparent
882 else:
882 else:
883 self.ui.warn(_("warning: can't find ancestor for '%s' "
883 self.ui.warn(_("warning: can't find ancestor for '%s' "
884 "copied from '%s'!\n") % (fname, cfname))
884 "copied from '%s'!\n") % (fname, cfname))
885
885
886 elif fparent2 != nullid:
886 elif fparent2 != nullid:
887 # is one parent an ancestor of the other?
887 # is one parent an ancestor of the other?
888 fparentancestor = flog.ancestor(fparent1, fparent2)
888 fparentancestor = flog.ancestor(fparent1, fparent2)
889 if fparentancestor == fparent1:
889 if fparentancestor == fparent1:
890 fparent1, fparent2 = fparent2, nullid
890 fparent1, fparent2 = fparent2, nullid
891 elif fparentancestor == fparent2:
891 elif fparentancestor == fparent2:
892 fparent2 = nullid
892 fparent2 = nullid
893
893
894 # is the file changed?
894 # is the file changed?
895 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
895 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
896 changelist.append(fname)
896 changelist.append(fname)
897 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
897 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
898
898
899 # are just the flags changed during merge?
899 # are just the flags changed during merge?
900 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
900 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
901 changelist.append(fname)
901 changelist.append(fname)
902
902
903 return fparent1
903 return fparent1
904
904
905 def commit(self, text="", user=None, date=None, match=None, force=False,
905 def commit(self, text="", user=None, date=None, match=None, force=False,
906 editor=False, extra={}):
906 editor=False, extra={}):
907 """Add a new revision to current repository.
907 """Add a new revision to current repository.
908
908
909 Revision information is gathered from the working directory,
909 Revision information is gathered from the working directory,
910 match can be used to filter the committed files. If editor is
910 match can be used to filter the committed files. If editor is
911 supplied, it is called to get a commit message.
911 supplied, it is called to get a commit message.
912 """
912 """
913
913
914 def fail(f, msg):
914 def fail(f, msg):
915 raise util.Abort('%s: %s' % (f, msg))
915 raise util.Abort('%s: %s' % (f, msg))
916
916
917 if not match:
917 if not match:
918 match = matchmod.always(self.root, '')
918 match = matchmod.always(self.root, '')
919
919
920 if not force:
920 if not force:
921 vdirs = []
921 vdirs = []
922 match.dir = vdirs.append
922 match.dir = vdirs.append
923 match.bad = fail
923 match.bad = fail
924
924
925 wlock = self.wlock()
925 wlock = self.wlock()
926 try:
926 try:
927 wctx = self[None]
927 wctx = self[None]
928 merge = len(wctx.parents()) > 1
928 merge = len(wctx.parents()) > 1
929
929
930 if (not force and merge and match and
930 if (not force and merge and match and
931 (match.files() or match.anypats())):
931 (match.files() or match.anypats())):
932 raise util.Abort(_('cannot partially commit a merge '
932 raise util.Abort(_('cannot partially commit a merge '
933 '(do not specify files or patterns)'))
933 '(do not specify files or patterns)'))
934
934
935 changes = self.status(match=match, clean=force)
935 changes = self.status(match=match, clean=force)
936 if force:
936 if force:
937 changes[0].extend(changes[6]) # mq may commit unchanged files
937 changes[0].extend(changes[6]) # mq may commit unchanged files
938
938
939 # check subrepos
939 # check subrepos
940 subs = []
940 subs = []
941 removedsubs = set()
941 removedsubs = set()
942 for p in wctx.parents():
942 for p in wctx.parents():
943 removedsubs.update(s for s in p.substate if match(s))
943 removedsubs.update(s for s in p.substate if match(s))
944 for s in wctx.substate:
944 for s in wctx.substate:
945 removedsubs.discard(s)
945 removedsubs.discard(s)
946 if match(s) and wctx.sub(s).dirty():
946 if match(s) and wctx.sub(s).dirty():
947 subs.append(s)
947 subs.append(s)
948 if (subs or removedsubs):
948 if (subs or removedsubs):
949 if (not match('.hgsub') and
949 if (not match('.hgsub') and
950 '.hgsub' in (wctx.modified() + wctx.added())):
950 '.hgsub' in (wctx.modified() + wctx.added())):
951 raise util.Abort(_("can't commit subrepos without .hgsub"))
951 raise util.Abort(_("can't commit subrepos without .hgsub"))
952 if '.hgsubstate' not in changes[0]:
952 if '.hgsubstate' not in changes[0]:
953 changes[0].insert(0, '.hgsubstate')
953 changes[0].insert(0, '.hgsubstate')
954
954
955 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
955 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
956 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
956 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
957 if changedsubs:
957 if changedsubs:
958 raise util.Abort(_("uncommitted changes in subrepo %s")
958 raise util.Abort(_("uncommitted changes in subrepo %s")
959 % changedsubs[0])
959 % changedsubs[0])
960
960
961 # make sure all explicit patterns are matched
961 # make sure all explicit patterns are matched
962 if not force and match.files():
962 if not force and match.files():
963 matched = set(changes[0] + changes[1] + changes[2])
963 matched = set(changes[0] + changes[1] + changes[2])
964
964
965 for f in match.files():
965 for f in match.files():
966 if f == '.' or f in matched or f in wctx.substate:
966 if f == '.' or f in matched or f in wctx.substate:
967 continue
967 continue
968 if f in changes[3]: # missing
968 if f in changes[3]: # missing
969 fail(f, _('file not found!'))
969 fail(f, _('file not found!'))
970 if f in vdirs: # visited directory
970 if f in vdirs: # visited directory
971 d = f + '/'
971 d = f + '/'
972 for mf in matched:
972 for mf in matched:
973 if mf.startswith(d):
973 if mf.startswith(d):
974 break
974 break
975 else:
975 else:
976 fail(f, _("no match under directory!"))
976 fail(f, _("no match under directory!"))
977 elif f not in self.dirstate:
977 elif f not in self.dirstate:
978 fail(f, _("file not tracked!"))
978 fail(f, _("file not tracked!"))
979
979
980 if (not force and not extra.get("close") and not merge
980 if (not force and not extra.get("close") and not merge
981 and not (changes[0] or changes[1] or changes[2])
981 and not (changes[0] or changes[1] or changes[2])
982 and wctx.branch() == wctx.p1().branch()):
982 and wctx.branch() == wctx.p1().branch()):
983 return None
983 return None
984
984
985 ms = mergemod.mergestate(self)
985 ms = mergemod.mergestate(self)
986 for f in changes[0]:
986 for f in changes[0]:
987 if f in ms and ms[f] == 'u':
987 if f in ms and ms[f] == 'u':
988 raise util.Abort(_("unresolved merge conflicts "
988 raise util.Abort(_("unresolved merge conflicts "
989 "(see hg help resolve)"))
989 "(see hg help resolve)"))
990
990
991 cctx = context.workingctx(self, text, user, date, extra, changes)
991 cctx = context.workingctx(self, text, user, date, extra, changes)
992 if editor:
992 if editor:
993 cctx._text = editor(self, cctx, subs)
993 cctx._text = editor(self, cctx, subs)
994 edited = (text != cctx._text)
994 edited = (text != cctx._text)
995
995
996 # commit subs
996 # commit subs
997 if subs or removedsubs:
997 if subs or removedsubs:
998 state = wctx.substate.copy()
998 state = wctx.substate.copy()
999 for s in sorted(subs):
999 for s in sorted(subs):
1000 sub = wctx.sub(s)
1000 sub = wctx.sub(s)
1001 self.ui.status(_('committing subrepository %s\n') %
1001 self.ui.status(_('committing subrepository %s\n') %
1002 subrepo.subrelpath(sub))
1002 subrepo.subrelpath(sub))
1003 sr = sub.commit(cctx._text, user, date)
1003 sr = sub.commit(cctx._text, user, date)
1004 state[s] = (state[s][0], sr)
1004 state[s] = (state[s][0], sr)
1005 subrepo.writestate(self, state)
1005 subrepo.writestate(self, state)
1006
1006
1007 # Save commit message in case this transaction gets rolled back
1007 # Save commit message in case this transaction gets rolled back
1008 # (e.g. by a pretxncommit hook). Leave the content alone on
1008 # (e.g. by a pretxncommit hook). Leave the content alone on
1009 # the assumption that the user will use the same editor again.
1009 # the assumption that the user will use the same editor again.
1010 msgfile = self.opener('last-message.txt', 'wb')
1010 msgfile = self.opener('last-message.txt', 'wb')
1011 msgfile.write(cctx._text)
1011 msgfile.write(cctx._text)
1012 msgfile.close()
1012 msgfile.close()
1013
1013
1014 p1, p2 = self.dirstate.parents()
1014 p1, p2 = self.dirstate.parents()
1015 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1015 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1016 try:
1016 try:
1017 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1017 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1018 ret = self.commitctx(cctx, True)
1018 ret = self.commitctx(cctx, True)
1019 except:
1019 except:
1020 if edited:
1020 if edited:
1021 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1021 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1022 self.ui.write(
1022 self.ui.write(
1023 _('note: commit message saved in %s\n') % msgfn)
1023 _('note: commit message saved in %s\n') % msgfn)
1024 raise
1024 raise
1025
1025
1026 # update bookmarks, dirstate and mergestate
1026 # update bookmarks, dirstate and mergestate
1027 parents = (p1, p2)
1027 bookmarks.update(self, p1, ret)
1028 if p2 == nullid:
1029 parents = (p1,)
1030 bookmarks.update(self, parents, ret)
1031 for f in changes[0] + changes[1]:
1028 for f in changes[0] + changes[1]:
1032 self.dirstate.normal(f)
1029 self.dirstate.normal(f)
1033 for f in changes[2]:
1030 for f in changes[2]:
1034 self.dirstate.forget(f)
1031 self.dirstate.forget(f)
1035 self.dirstate.setparents(ret)
1032 self.dirstate.setparents(ret)
1036 ms.reset()
1033 ms.reset()
1037 finally:
1034 finally:
1038 wlock.release()
1035 wlock.release()
1039
1036
1040 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1037 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1041 return ret
1038 return ret
1042
1039
1043 def commitctx(self, ctx, error=False):
1040 def commitctx(self, ctx, error=False):
1044 """Add a new revision to current repository.
1041 """Add a new revision to current repository.
1045 Revision information is passed via the context argument.
1042 Revision information is passed via the context argument.
1046 """
1043 """
1047
1044
1048 tr = lock = None
1045 tr = lock = None
1049 removed = list(ctx.removed())
1046 removed = list(ctx.removed())
1050 p1, p2 = ctx.p1(), ctx.p2()
1047 p1, p2 = ctx.p1(), ctx.p2()
1051 m1 = p1.manifest().copy()
1048 m1 = p1.manifest().copy()
1052 m2 = p2.manifest()
1049 m2 = p2.manifest()
1053 user = ctx.user()
1050 user = ctx.user()
1054
1051
1055 lock = self.lock()
1052 lock = self.lock()
1056 try:
1053 try:
1057 tr = self.transaction("commit")
1054 tr = self.transaction("commit")
1058 trp = weakref.proxy(tr)
1055 trp = weakref.proxy(tr)
1059
1056
1060 # check in files
1057 # check in files
1061 new = {}
1058 new = {}
1062 changed = []
1059 changed = []
1063 linkrev = len(self)
1060 linkrev = len(self)
1064 for f in sorted(ctx.modified() + ctx.added()):
1061 for f in sorted(ctx.modified() + ctx.added()):
1065 self.ui.note(f + "\n")
1062 self.ui.note(f + "\n")
1066 try:
1063 try:
1067 fctx = ctx[f]
1064 fctx = ctx[f]
1068 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1065 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1069 changed)
1066 changed)
1070 m1.set(f, fctx.flags())
1067 m1.set(f, fctx.flags())
1071 except OSError, inst:
1068 except OSError, inst:
1072 self.ui.warn(_("trouble committing %s!\n") % f)
1069 self.ui.warn(_("trouble committing %s!\n") % f)
1073 raise
1070 raise
1074 except IOError, inst:
1071 except IOError, inst:
1075 errcode = getattr(inst, 'errno', errno.ENOENT)
1072 errcode = getattr(inst, 'errno', errno.ENOENT)
1076 if error or errcode and errcode != errno.ENOENT:
1073 if error or errcode and errcode != errno.ENOENT:
1077 self.ui.warn(_("trouble committing %s!\n") % f)
1074 self.ui.warn(_("trouble committing %s!\n") % f)
1078 raise
1075 raise
1079 else:
1076 else:
1080 removed.append(f)
1077 removed.append(f)
1081
1078
1082 # update manifest
1079 # update manifest
1083 m1.update(new)
1080 m1.update(new)
1084 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1081 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1085 drop = [f for f in removed if f in m1]
1082 drop = [f for f in removed if f in m1]
1086 for f in drop:
1083 for f in drop:
1087 del m1[f]
1084 del m1[f]
1088 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1085 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1089 p2.manifestnode(), (new, drop))
1086 p2.manifestnode(), (new, drop))
1090
1087
1091 # update changelog
1088 # update changelog
1092 self.changelog.delayupdate()
1089 self.changelog.delayupdate()
1093 n = self.changelog.add(mn, changed + removed, ctx.description(),
1090 n = self.changelog.add(mn, changed + removed, ctx.description(),
1094 trp, p1.node(), p2.node(),
1091 trp, p1.node(), p2.node(),
1095 user, ctx.date(), ctx.extra().copy())
1092 user, ctx.date(), ctx.extra().copy())
1096 p = lambda: self.changelog.writepending() and self.root or ""
1093 p = lambda: self.changelog.writepending() and self.root or ""
1097 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1094 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1098 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1095 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1099 parent2=xp2, pending=p)
1096 parent2=xp2, pending=p)
1100 self.changelog.finalize(trp)
1097 self.changelog.finalize(trp)
1101 tr.close()
1098 tr.close()
1102
1099
1103 if self._branchcache:
1100 if self._branchcache:
1104 self.updatebranchcache()
1101 self.updatebranchcache()
1105 return n
1102 return n
1106 finally:
1103 finally:
1107 if tr:
1104 if tr:
1108 tr.release()
1105 tr.release()
1109 lock.release()
1106 lock.release()
1110
1107
1111 def destroyed(self):
1108 def destroyed(self):
1112 '''Inform the repository that nodes have been destroyed.
1109 '''Inform the repository that nodes have been destroyed.
1113 Intended for use by strip and rollback, so there's a common
1110 Intended for use by strip and rollback, so there's a common
1114 place for anything that has to be done after destroying history.'''
1111 place for anything that has to be done after destroying history.'''
1115 # XXX it might be nice if we could take the list of destroyed
1112 # XXX it might be nice if we could take the list of destroyed
1116 # nodes, but I don't see an easy way for rollback() to do that
1113 # nodes, but I don't see an easy way for rollback() to do that
1117
1114
1118 # Ensure the persistent tag cache is updated. Doing it now
1115 # Ensure the persistent tag cache is updated. Doing it now
1119 # means that the tag cache only has to worry about destroyed
1116 # means that the tag cache only has to worry about destroyed
1120 # heads immediately after a strip/rollback. That in turn
1117 # heads immediately after a strip/rollback. That in turn
1121 # guarantees that "cachetip == currenttip" (comparing both rev
1118 # guarantees that "cachetip == currenttip" (comparing both rev
1122 # and node) always means no nodes have been added or destroyed.
1119 # and node) always means no nodes have been added or destroyed.
1123
1120
1124 # XXX this is suboptimal when qrefresh'ing: we strip the current
1121 # XXX this is suboptimal when qrefresh'ing: we strip the current
1125 # head, refresh the tag cache, then immediately add a new head.
1122 # head, refresh the tag cache, then immediately add a new head.
1126 # But I think doing it this way is necessary for the "instant
1123 # But I think doing it this way is necessary for the "instant
1127 # tag cache retrieval" case to work.
1124 # tag cache retrieval" case to work.
1128 self.invalidatecaches()
1125 self.invalidatecaches()
1129
1126
1130 def walk(self, match, node=None):
1127 def walk(self, match, node=None):
1131 '''
1128 '''
1132 walk recursively through the directory tree or a given
1129 walk recursively through the directory tree or a given
1133 changeset, finding all files matched by the match
1130 changeset, finding all files matched by the match
1134 function
1131 function
1135 '''
1132 '''
1136 return self[node].walk(match)
1133 return self[node].walk(match)
1137
1134
1138 def status(self, node1='.', node2=None, match=None,
1135 def status(self, node1='.', node2=None, match=None,
1139 ignored=False, clean=False, unknown=False,
1136 ignored=False, clean=False, unknown=False,
1140 listsubrepos=False):
1137 listsubrepos=False):
1141 """return status of files between two nodes or node and working directory
1138 """return status of files between two nodes or node and working directory
1142
1139
1143 If node1 is None, use the first dirstate parent instead.
1140 If node1 is None, use the first dirstate parent instead.
1144 If node2 is None, compare node1 with working directory.
1141 If node2 is None, compare node1 with working directory.
1145 """
1142 """
1146
1143
1147 def mfmatches(ctx):
1144 def mfmatches(ctx):
1148 mf = ctx.manifest().copy()
1145 mf = ctx.manifest().copy()
1149 for fn in mf.keys():
1146 for fn in mf.keys():
1150 if not match(fn):
1147 if not match(fn):
1151 del mf[fn]
1148 del mf[fn]
1152 return mf
1149 return mf
1153
1150
1154 if isinstance(node1, context.changectx):
1151 if isinstance(node1, context.changectx):
1155 ctx1 = node1
1152 ctx1 = node1
1156 else:
1153 else:
1157 ctx1 = self[node1]
1154 ctx1 = self[node1]
1158 if isinstance(node2, context.changectx):
1155 if isinstance(node2, context.changectx):
1159 ctx2 = node2
1156 ctx2 = node2
1160 else:
1157 else:
1161 ctx2 = self[node2]
1158 ctx2 = self[node2]
1162
1159
1163 working = ctx2.rev() is None
1160 working = ctx2.rev() is None
1164 parentworking = working and ctx1 == self['.']
1161 parentworking = working and ctx1 == self['.']
1165 match = match or matchmod.always(self.root, self.getcwd())
1162 match = match or matchmod.always(self.root, self.getcwd())
1166 listignored, listclean, listunknown = ignored, clean, unknown
1163 listignored, listclean, listunknown = ignored, clean, unknown
1167
1164
1168 # load earliest manifest first for caching reasons
1165 # load earliest manifest first for caching reasons
1169 if not working and ctx2.rev() < ctx1.rev():
1166 if not working and ctx2.rev() < ctx1.rev():
1170 ctx2.manifest()
1167 ctx2.manifest()
1171
1168
1172 if not parentworking:
1169 if not parentworking:
1173 def bad(f, msg):
1170 def bad(f, msg):
1174 if f not in ctx1:
1171 if f not in ctx1:
1175 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1172 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1176 match.bad = bad
1173 match.bad = bad
1177
1174
1178 if working: # we need to scan the working dir
1175 if working: # we need to scan the working dir
1179 subrepos = []
1176 subrepos = []
1180 if '.hgsub' in self.dirstate:
1177 if '.hgsub' in self.dirstate:
1181 subrepos = ctx1.substate.keys()
1178 subrepos = ctx1.substate.keys()
1182 s = self.dirstate.status(match, subrepos, listignored,
1179 s = self.dirstate.status(match, subrepos, listignored,
1183 listclean, listunknown)
1180 listclean, listunknown)
1184 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1181 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1185
1182
1186 # check for any possibly clean files
1183 # check for any possibly clean files
1187 if parentworking and cmp:
1184 if parentworking and cmp:
1188 fixup = []
1185 fixup = []
1189 # do a full compare of any files that might have changed
1186 # do a full compare of any files that might have changed
1190 for f in sorted(cmp):
1187 for f in sorted(cmp):
1191 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1188 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1192 or ctx1[f].cmp(ctx2[f])):
1189 or ctx1[f].cmp(ctx2[f])):
1193 modified.append(f)
1190 modified.append(f)
1194 else:
1191 else:
1195 fixup.append(f)
1192 fixup.append(f)
1196
1193
1197 # update dirstate for files that are actually clean
1194 # update dirstate for files that are actually clean
1198 if fixup:
1195 if fixup:
1199 if listclean:
1196 if listclean:
1200 clean += fixup
1197 clean += fixup
1201
1198
1202 try:
1199 try:
1203 # updating the dirstate is optional
1200 # updating the dirstate is optional
1204 # so we don't wait on the lock
1201 # so we don't wait on the lock
1205 wlock = self.wlock(False)
1202 wlock = self.wlock(False)
1206 try:
1203 try:
1207 for f in fixup:
1204 for f in fixup:
1208 self.dirstate.normal(f)
1205 self.dirstate.normal(f)
1209 finally:
1206 finally:
1210 wlock.release()
1207 wlock.release()
1211 except error.LockError:
1208 except error.LockError:
1212 pass
1209 pass
1213
1210
1214 if not parentworking:
1211 if not parentworking:
1215 mf1 = mfmatches(ctx1)
1212 mf1 = mfmatches(ctx1)
1216 if working:
1213 if working:
1217 # we are comparing working dir against non-parent
1214 # we are comparing working dir against non-parent
1218 # generate a pseudo-manifest for the working dir
1215 # generate a pseudo-manifest for the working dir
1219 mf2 = mfmatches(self['.'])
1216 mf2 = mfmatches(self['.'])
1220 for f in cmp + modified + added:
1217 for f in cmp + modified + added:
1221 mf2[f] = None
1218 mf2[f] = None
1222 mf2.set(f, ctx2.flags(f))
1219 mf2.set(f, ctx2.flags(f))
1223 for f in removed:
1220 for f in removed:
1224 if f in mf2:
1221 if f in mf2:
1225 del mf2[f]
1222 del mf2[f]
1226 else:
1223 else:
1227 # we are comparing two revisions
1224 # we are comparing two revisions
1228 deleted, unknown, ignored = [], [], []
1225 deleted, unknown, ignored = [], [], []
1229 mf2 = mfmatches(ctx2)
1226 mf2 = mfmatches(ctx2)
1230
1227
1231 modified, added, clean = [], [], []
1228 modified, added, clean = [], [], []
1232 for fn in mf2:
1229 for fn in mf2:
1233 if fn in mf1:
1230 if fn in mf1:
1234 if (mf1.flags(fn) != mf2.flags(fn) or
1231 if (mf1.flags(fn) != mf2.flags(fn) or
1235 (mf1[fn] != mf2[fn] and
1232 (mf1[fn] != mf2[fn] and
1236 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1233 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1237 modified.append(fn)
1234 modified.append(fn)
1238 elif listclean:
1235 elif listclean:
1239 clean.append(fn)
1236 clean.append(fn)
1240 del mf1[fn]
1237 del mf1[fn]
1241 else:
1238 else:
1242 added.append(fn)
1239 added.append(fn)
1243 removed = mf1.keys()
1240 removed = mf1.keys()
1244
1241
1245 r = modified, added, removed, deleted, unknown, ignored, clean
1242 r = modified, added, removed, deleted, unknown, ignored, clean
1246
1243
1247 if listsubrepos:
1244 if listsubrepos:
1248 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1245 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1249 if working:
1246 if working:
1250 rev2 = None
1247 rev2 = None
1251 else:
1248 else:
1252 rev2 = ctx2.substate[subpath][1]
1249 rev2 = ctx2.substate[subpath][1]
1253 try:
1250 try:
1254 submatch = matchmod.narrowmatcher(subpath, match)
1251 submatch = matchmod.narrowmatcher(subpath, match)
1255 s = sub.status(rev2, match=submatch, ignored=listignored,
1252 s = sub.status(rev2, match=submatch, ignored=listignored,
1256 clean=listclean, unknown=listunknown,
1253 clean=listclean, unknown=listunknown,
1257 listsubrepos=True)
1254 listsubrepos=True)
1258 for rfiles, sfiles in zip(r, s):
1255 for rfiles, sfiles in zip(r, s):
1259 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1256 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1260 except error.LookupError:
1257 except error.LookupError:
1261 self.ui.status(_("skipping missing subrepository: %s\n")
1258 self.ui.status(_("skipping missing subrepository: %s\n")
1262 % subpath)
1259 % subpath)
1263
1260
1264 for l in r:
1261 for l in r:
1265 l.sort()
1262 l.sort()
1266 return r
1263 return r
1267
1264
1268 def heads(self, start=None):
1265 def heads(self, start=None):
1269 heads = self.changelog.heads(start)
1266 heads = self.changelog.heads(start)
1270 # sort the output in rev descending order
1267 # sort the output in rev descending order
1271 return sorted(heads, key=self.changelog.rev, reverse=True)
1268 return sorted(heads, key=self.changelog.rev, reverse=True)
1272
1269
1273 def branchheads(self, branch=None, start=None, closed=False):
1270 def branchheads(self, branch=None, start=None, closed=False):
1274 '''return a (possibly filtered) list of heads for the given branch
1271 '''return a (possibly filtered) list of heads for the given branch
1275
1272
1276 Heads are returned in topological order, from newest to oldest.
1273 Heads are returned in topological order, from newest to oldest.
1277 If branch is None, use the dirstate branch.
1274 If branch is None, use the dirstate branch.
1278 If start is not None, return only heads reachable from start.
1275 If start is not None, return only heads reachable from start.
1279 If closed is True, return heads that are marked as closed as well.
1276 If closed is True, return heads that are marked as closed as well.
1280 '''
1277 '''
1281 if branch is None:
1278 if branch is None:
1282 branch = self[None].branch()
1279 branch = self[None].branch()
1283 branches = self.branchmap()
1280 branches = self.branchmap()
1284 if branch not in branches:
1281 if branch not in branches:
1285 return []
1282 return []
1286 # the cache returns heads ordered lowest to highest
1283 # the cache returns heads ordered lowest to highest
1287 bheads = list(reversed(branches[branch]))
1284 bheads = list(reversed(branches[branch]))
1288 if start is not None:
1285 if start is not None:
1289 # filter out the heads that cannot be reached from startrev
1286 # filter out the heads that cannot be reached from startrev
1290 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1287 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1291 bheads = [h for h in bheads if h in fbheads]
1288 bheads = [h for h in bheads if h in fbheads]
1292 if not closed:
1289 if not closed:
1293 bheads = [h for h in bheads if
1290 bheads = [h for h in bheads if
1294 ('close' not in self.changelog.read(h)[5])]
1291 ('close' not in self.changelog.read(h)[5])]
1295 return bheads
1292 return bheads
1296
1293
1297 def branches(self, nodes):
1294 def branches(self, nodes):
1298 if not nodes:
1295 if not nodes:
1299 nodes = [self.changelog.tip()]
1296 nodes = [self.changelog.tip()]
1300 b = []
1297 b = []
1301 for n in nodes:
1298 for n in nodes:
1302 t = n
1299 t = n
1303 while 1:
1300 while 1:
1304 p = self.changelog.parents(n)
1301 p = self.changelog.parents(n)
1305 if p[1] != nullid or p[0] == nullid:
1302 if p[1] != nullid or p[0] == nullid:
1306 b.append((t, n, p[0], p[1]))
1303 b.append((t, n, p[0], p[1]))
1307 break
1304 break
1308 n = p[0]
1305 n = p[0]
1309 return b
1306 return b
1310
1307
1311 def between(self, pairs):
1308 def between(self, pairs):
1312 r = []
1309 r = []
1313
1310
1314 for top, bottom in pairs:
1311 for top, bottom in pairs:
1315 n, l, i = top, [], 0
1312 n, l, i = top, [], 0
1316 f = 1
1313 f = 1
1317
1314
1318 while n != bottom and n != nullid:
1315 while n != bottom and n != nullid:
1319 p = self.changelog.parents(n)[0]
1316 p = self.changelog.parents(n)[0]
1320 if i == f:
1317 if i == f:
1321 l.append(n)
1318 l.append(n)
1322 f = f * 2
1319 f = f * 2
1323 n = p
1320 n = p
1324 i += 1
1321 i += 1
1325
1322
1326 r.append(l)
1323 r.append(l)
1327
1324
1328 return r
1325 return r
1329
1326
1330 def pull(self, remote, heads=None, force=False):
1327 def pull(self, remote, heads=None, force=False):
1331 lock = self.lock()
1328 lock = self.lock()
1332 try:
1329 try:
1333 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1330 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1334 force=force)
1331 force=force)
1335 common, fetch, rheads = tmp
1332 common, fetch, rheads = tmp
1336 if not fetch:
1333 if not fetch:
1337 self.ui.status(_("no changes found\n"))
1334 self.ui.status(_("no changes found\n"))
1338 result = 0
1335 result = 0
1339 else:
1336 else:
1340 if heads is None and fetch == [nullid]:
1337 if heads is None and fetch == [nullid]:
1341 self.ui.status(_("requesting all changes\n"))
1338 self.ui.status(_("requesting all changes\n"))
1342 elif heads is None and remote.capable('changegroupsubset'):
1339 elif heads is None and remote.capable('changegroupsubset'):
1343 # issue1320, avoid a race if remote changed after discovery
1340 # issue1320, avoid a race if remote changed after discovery
1344 heads = rheads
1341 heads = rheads
1345
1342
1346 if heads is None:
1343 if heads is None:
1347 cg = remote.changegroup(fetch, 'pull')
1344 cg = remote.changegroup(fetch, 'pull')
1348 elif not remote.capable('changegroupsubset'):
1345 elif not remote.capable('changegroupsubset'):
1349 raise util.Abort(_("partial pull cannot be done because "
1346 raise util.Abort(_("partial pull cannot be done because "
1350 "other repository doesn't support "
1347 "other repository doesn't support "
1351 "changegroupsubset."))
1348 "changegroupsubset."))
1352 else:
1349 else:
1353 cg = remote.changegroupsubset(fetch, heads, 'pull')
1350 cg = remote.changegroupsubset(fetch, heads, 'pull')
1354 result = self.addchangegroup(cg, 'pull', remote.url(),
1351 result = self.addchangegroup(cg, 'pull', remote.url(),
1355 lock=lock)
1352 lock=lock)
1356 finally:
1353 finally:
1357 lock.release()
1354 lock.release()
1358
1355
1359 self.ui.debug("checking for updated bookmarks\n")
1356 self.ui.debug("checking for updated bookmarks\n")
1360 rb = remote.listkeys('bookmarks')
1357 rb = remote.listkeys('bookmarks')
1361 changed = False
1358 changed = False
1362 for k in rb.keys():
1359 for k in rb.keys():
1363 if k in self._bookmarks:
1360 if k in self._bookmarks:
1364 nr, nl = rb[k], self._bookmarks[k]
1361 nr, nl = rb[k], self._bookmarks[k]
1365 if nr in self:
1362 if nr in self:
1366 cr = self[nr]
1363 cr = self[nr]
1367 cl = self[nl]
1364 cl = self[nl]
1368 if cl.rev() >= cr.rev():
1365 if cl.rev() >= cr.rev():
1369 continue
1366 continue
1370 if cr in cl.descendants():
1367 if cr in cl.descendants():
1371 self._bookmarks[k] = cr.node()
1368 self._bookmarks[k] = cr.node()
1372 changed = True
1369 changed = True
1373 self.ui.status(_("updating bookmark %s\n") % k)
1370 self.ui.status(_("updating bookmark %s\n") % k)
1374 else:
1371 else:
1375 self.ui.warn(_("not updating divergent"
1372 self.ui.warn(_("not updating divergent"
1376 " bookmark %s\n") % k)
1373 " bookmark %s\n") % k)
1377 if changed:
1374 if changed:
1378 bookmarks.write(self)
1375 bookmarks.write(self)
1379
1376
1380 return result
1377 return result
1381
1378
1382 def checkpush(self, force, revs):
1379 def checkpush(self, force, revs):
1383 """Extensions can override this function if additional checks have
1380 """Extensions can override this function if additional checks have
1384 to be performed before pushing, or call it if they override push
1381 to be performed before pushing, or call it if they override push
1385 command.
1382 command.
1386 """
1383 """
1387 pass
1384 pass
1388
1385
1389 def push(self, remote, force=False, revs=None, newbranch=False):
1386 def push(self, remote, force=False, revs=None, newbranch=False):
1390 '''Push outgoing changesets (limited by revs) from the current
1387 '''Push outgoing changesets (limited by revs) from the current
1391 repository to remote. Return an integer:
1388 repository to remote. Return an integer:
1392 - 0 means HTTP error *or* nothing to push
1389 - 0 means HTTP error *or* nothing to push
1393 - 1 means we pushed and remote head count is unchanged *or*
1390 - 1 means we pushed and remote head count is unchanged *or*
1394 we have outgoing changesets but refused to push
1391 we have outgoing changesets but refused to push
1395 - other values as described by addchangegroup()
1392 - other values as described by addchangegroup()
1396 '''
1393 '''
1397 # there are two ways to push to remote repo:
1394 # there are two ways to push to remote repo:
1398 #
1395 #
1399 # addchangegroup assumes local user can lock remote
1396 # addchangegroup assumes local user can lock remote
1400 # repo (local filesystem, old ssh servers).
1397 # repo (local filesystem, old ssh servers).
1401 #
1398 #
1402 # unbundle assumes local user cannot lock remote repo (new ssh
1399 # unbundle assumes local user cannot lock remote repo (new ssh
1403 # servers, http servers).
1400 # servers, http servers).
1404
1401
1405 self.checkpush(force, revs)
1402 self.checkpush(force, revs)
1406 lock = None
1403 lock = None
1407 unbundle = remote.capable('unbundle')
1404 unbundle = remote.capable('unbundle')
1408 if not unbundle:
1405 if not unbundle:
1409 lock = remote.lock()
1406 lock = remote.lock()
1410 try:
1407 try:
1411 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1408 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1412 newbranch)
1409 newbranch)
1413 ret = remote_heads
1410 ret = remote_heads
1414 if cg is not None:
1411 if cg is not None:
1415 if unbundle:
1412 if unbundle:
1416 # local repo finds heads on server, finds out what
1413 # local repo finds heads on server, finds out what
1417 # revs it must push. once revs transferred, if server
1414 # revs it must push. once revs transferred, if server
1418 # finds it has different heads (someone else won
1415 # finds it has different heads (someone else won
1419 # commit/push race), server aborts.
1416 # commit/push race), server aborts.
1420 if force:
1417 if force:
1421 remote_heads = ['force']
1418 remote_heads = ['force']
1422 # ssh: return remote's addchangegroup()
1419 # ssh: return remote's addchangegroup()
1423 # http: return remote's addchangegroup() or 0 for error
1420 # http: return remote's addchangegroup() or 0 for error
1424 ret = remote.unbundle(cg, remote_heads, 'push')
1421 ret = remote.unbundle(cg, remote_heads, 'push')
1425 else:
1422 else:
1426 # we return an integer indicating remote head count change
1423 # we return an integer indicating remote head count change
1427 ret = remote.addchangegroup(cg, 'push', self.url(),
1424 ret = remote.addchangegroup(cg, 'push', self.url(),
1428 lock=lock)
1425 lock=lock)
1429 finally:
1426 finally:
1430 if lock is not None:
1427 if lock is not None:
1431 lock.release()
1428 lock.release()
1432
1429
1433 self.ui.debug("checking for updated bookmarks\n")
1430 self.ui.debug("checking for updated bookmarks\n")
1434 rb = remote.listkeys('bookmarks')
1431 rb = remote.listkeys('bookmarks')
1435 for k in rb.keys():
1432 for k in rb.keys():
1436 if k in self._bookmarks:
1433 if k in self._bookmarks:
1437 nr, nl = rb[k], hex(self._bookmarks[k])
1434 nr, nl = rb[k], hex(self._bookmarks[k])
1438 if nr in self:
1435 if nr in self:
1439 cr = self[nr]
1436 cr = self[nr]
1440 cl = self[nl]
1437 cl = self[nl]
1441 if cl in cr.descendants():
1438 if cl in cr.descendants():
1442 r = remote.pushkey('bookmarks', k, nr, nl)
1439 r = remote.pushkey('bookmarks', k, nr, nl)
1443 if r:
1440 if r:
1444 self.ui.status(_("updating bookmark %s\n") % k)
1441 self.ui.status(_("updating bookmark %s\n") % k)
1445 else:
1442 else:
1446 self.ui.warn(_('updating bookmark %s'
1443 self.ui.warn(_('updating bookmark %s'
1447 ' failed!\n') % k)
1444 ' failed!\n') % k)
1448
1445
1449 return ret
1446 return ret
1450
1447
1451 def changegroupinfo(self, nodes, source):
1448 def changegroupinfo(self, nodes, source):
1452 if self.ui.verbose or source == 'bundle':
1449 if self.ui.verbose or source == 'bundle':
1453 self.ui.status(_("%d changesets found\n") % len(nodes))
1450 self.ui.status(_("%d changesets found\n") % len(nodes))
1454 if self.ui.debugflag:
1451 if self.ui.debugflag:
1455 self.ui.debug("list of changesets:\n")
1452 self.ui.debug("list of changesets:\n")
1456 for node in nodes:
1453 for node in nodes:
1457 self.ui.debug("%s\n" % hex(node))
1454 self.ui.debug("%s\n" % hex(node))
1458
1455
1459 def changegroupsubset(self, bases, heads, source, extranodes=None):
1456 def changegroupsubset(self, bases, heads, source, extranodes=None):
1460 """Compute a changegroup consisting of all the nodes that are
1457 """Compute a changegroup consisting of all the nodes that are
1461 descendents of any of the bases and ancestors of any of the heads.
1458 descendents of any of the bases and ancestors of any of the heads.
1462 Return a chunkbuffer object whose read() method will return
1459 Return a chunkbuffer object whose read() method will return
1463 successive changegroup chunks.
1460 successive changegroup chunks.
1464
1461
1465 It is fairly complex as determining which filenodes and which
1462 It is fairly complex as determining which filenodes and which
1466 manifest nodes need to be included for the changeset to be complete
1463 manifest nodes need to be included for the changeset to be complete
1467 is non-trivial.
1464 is non-trivial.
1468
1465
1469 Another wrinkle is doing the reverse, figuring out which changeset in
1466 Another wrinkle is doing the reverse, figuring out which changeset in
1470 the changegroup a particular filenode or manifestnode belongs to.
1467 the changegroup a particular filenode or manifestnode belongs to.
1471
1468
1472 The caller can specify some nodes that must be included in the
1469 The caller can specify some nodes that must be included in the
1473 changegroup using the extranodes argument. It should be a dict
1470 changegroup using the extranodes argument. It should be a dict
1474 where the keys are the filenames (or 1 for the manifest), and the
1471 where the keys are the filenames (or 1 for the manifest), and the
1475 values are lists of (node, linknode) tuples, where node is a wanted
1472 values are lists of (node, linknode) tuples, where node is a wanted
1476 node and linknode is the changelog node that should be transmitted as
1473 node and linknode is the changelog node that should be transmitted as
1477 the linkrev.
1474 the linkrev.
1478 """
1475 """
1479
1476
1480 # Set up some initial variables
1477 # Set up some initial variables
1481 # Make it easy to refer to self.changelog
1478 # Make it easy to refer to self.changelog
1482 cl = self.changelog
1479 cl = self.changelog
1483 # Compute the list of changesets in this changegroup.
1480 # Compute the list of changesets in this changegroup.
1484 # Some bases may turn out to be superfluous, and some heads may be
1481 # Some bases may turn out to be superfluous, and some heads may be
1485 # too. nodesbetween will return the minimal set of bases and heads
1482 # too. nodesbetween will return the minimal set of bases and heads
1486 # necessary to re-create the changegroup.
1483 # necessary to re-create the changegroup.
1487 if not bases:
1484 if not bases:
1488 bases = [nullid]
1485 bases = [nullid]
1489 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1486 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1490
1487
1491 if extranodes is None:
1488 if extranodes is None:
1492 # can we go through the fast path ?
1489 # can we go through the fast path ?
1493 heads.sort()
1490 heads.sort()
1494 allheads = self.heads()
1491 allheads = self.heads()
1495 allheads.sort()
1492 allheads.sort()
1496 if heads == allheads:
1493 if heads == allheads:
1497 return self._changegroup(msng_cl_lst, source)
1494 return self._changegroup(msng_cl_lst, source)
1498
1495
1499 # slow path
1496 # slow path
1500 self.hook('preoutgoing', throw=True, source=source)
1497 self.hook('preoutgoing', throw=True, source=source)
1501
1498
1502 self.changegroupinfo(msng_cl_lst, source)
1499 self.changegroupinfo(msng_cl_lst, source)
1503
1500
1504 # We assume that all ancestors of bases are known
1501 # We assume that all ancestors of bases are known
1505 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1502 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1506
1503
1507 # Make it easy to refer to self.manifest
1504 # Make it easy to refer to self.manifest
1508 mnfst = self.manifest
1505 mnfst = self.manifest
1509 # We don't know which manifests are missing yet
1506 # We don't know which manifests are missing yet
1510 msng_mnfst_set = {}
1507 msng_mnfst_set = {}
1511 # Nor do we know which filenodes are missing.
1508 # Nor do we know which filenodes are missing.
1512 msng_filenode_set = {}
1509 msng_filenode_set = {}
1513
1510
1514 # A changeset always belongs to itself, so the changenode lookup
1511 # A changeset always belongs to itself, so the changenode lookup
1515 # function for a changenode is identity.
1512 # function for a changenode is identity.
1516 def identity(x):
1513 def identity(x):
1517 return x
1514 return x
1518
1515
1519 # A function generating function that sets up the initial environment
1516 # A function generating function that sets up the initial environment
1520 # the inner function.
1517 # the inner function.
1521 def filenode_collector(changedfiles):
1518 def filenode_collector(changedfiles):
1522 # This gathers information from each manifestnode included in the
1519 # This gathers information from each manifestnode included in the
1523 # changegroup about which filenodes the manifest node references
1520 # changegroup about which filenodes the manifest node references
1524 # so we can include those in the changegroup too.
1521 # so we can include those in the changegroup too.
1525 #
1522 #
1526 # It also remembers which changenode each filenode belongs to. It
1523 # It also remembers which changenode each filenode belongs to. It
1527 # does this by assuming the a filenode belongs to the changenode
1524 # does this by assuming the a filenode belongs to the changenode
1528 # the first manifest that references it belongs to.
1525 # the first manifest that references it belongs to.
1529 def collect_msng_filenodes(mnfstnode):
1526 def collect_msng_filenodes(mnfstnode):
1530 r = mnfst.rev(mnfstnode)
1527 r = mnfst.rev(mnfstnode)
1531 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1528 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1532 # If the previous rev is one of the parents,
1529 # If the previous rev is one of the parents,
1533 # we only need to see a diff.
1530 # we only need to see a diff.
1534 deltamf = mnfst.readdelta(mnfstnode)
1531 deltamf = mnfst.readdelta(mnfstnode)
1535 # For each line in the delta
1532 # For each line in the delta
1536 for f, fnode in deltamf.iteritems():
1533 for f, fnode in deltamf.iteritems():
1537 # And if the file is in the list of files we care
1534 # And if the file is in the list of files we care
1538 # about.
1535 # about.
1539 if f in changedfiles:
1536 if f in changedfiles:
1540 # Get the changenode this manifest belongs to
1537 # Get the changenode this manifest belongs to
1541 clnode = msng_mnfst_set[mnfstnode]
1538 clnode = msng_mnfst_set[mnfstnode]
1542 # Create the set of filenodes for the file if
1539 # Create the set of filenodes for the file if
1543 # there isn't one already.
1540 # there isn't one already.
1544 ndset = msng_filenode_set.setdefault(f, {})
1541 ndset = msng_filenode_set.setdefault(f, {})
1545 # And set the filenode's changelog node to the
1542 # And set the filenode's changelog node to the
1546 # manifest's if it hasn't been set already.
1543 # manifest's if it hasn't been set already.
1547 ndset.setdefault(fnode, clnode)
1544 ndset.setdefault(fnode, clnode)
1548 else:
1545 else:
1549 # Otherwise we need a full manifest.
1546 # Otherwise we need a full manifest.
1550 m = mnfst.read(mnfstnode)
1547 m = mnfst.read(mnfstnode)
1551 # For every file in we care about.
1548 # For every file in we care about.
1552 for f in changedfiles:
1549 for f in changedfiles:
1553 fnode = m.get(f, None)
1550 fnode = m.get(f, None)
1554 # If it's in the manifest
1551 # If it's in the manifest
1555 if fnode is not None:
1552 if fnode is not None:
1556 # See comments above.
1553 # See comments above.
1557 clnode = msng_mnfst_set[mnfstnode]
1554 clnode = msng_mnfst_set[mnfstnode]
1558 ndset = msng_filenode_set.setdefault(f, {})
1555 ndset = msng_filenode_set.setdefault(f, {})
1559 ndset.setdefault(fnode, clnode)
1556 ndset.setdefault(fnode, clnode)
1560 return collect_msng_filenodes
1557 return collect_msng_filenodes
1561
1558
1562 # If we determine that a particular file or manifest node must be a
1559 # If we determine that a particular file or manifest node must be a
1563 # node that the recipient of the changegroup will already have, we can
1560 # node that the recipient of the changegroup will already have, we can
1564 # also assume the recipient will have all the parents. This function
1561 # also assume the recipient will have all the parents. This function
1565 # prunes them from the set of missing nodes.
1562 # prunes them from the set of missing nodes.
1566 def prune(revlog, missingnodes):
1563 def prune(revlog, missingnodes):
1567 hasset = set()
1564 hasset = set()
1568 # If a 'missing' filenode thinks it belongs to a changenode we
1565 # If a 'missing' filenode thinks it belongs to a changenode we
1569 # assume the recipient must have, then the recipient must have
1566 # assume the recipient must have, then the recipient must have
1570 # that filenode.
1567 # that filenode.
1571 for n in missingnodes:
1568 for n in missingnodes:
1572 clrev = revlog.linkrev(revlog.rev(n))
1569 clrev = revlog.linkrev(revlog.rev(n))
1573 if clrev in commonrevs:
1570 if clrev in commonrevs:
1574 hasset.add(n)
1571 hasset.add(n)
1575 for n in hasset:
1572 for n in hasset:
1576 missingnodes.pop(n, None)
1573 missingnodes.pop(n, None)
1577 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1574 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1578 missingnodes.pop(revlog.node(r), None)
1575 missingnodes.pop(revlog.node(r), None)
1579
1576
1580 # Add the nodes that were explicitly requested.
1577 # Add the nodes that were explicitly requested.
1581 def add_extra_nodes(name, nodes):
1578 def add_extra_nodes(name, nodes):
1582 if not extranodes or name not in extranodes:
1579 if not extranodes or name not in extranodes:
1583 return
1580 return
1584
1581
1585 for node, linknode in extranodes[name]:
1582 for node, linknode in extranodes[name]:
1586 if node not in nodes:
1583 if node not in nodes:
1587 nodes[node] = linknode
1584 nodes[node] = linknode
1588
1585
1589 # Now that we have all theses utility functions to help out and
1586 # Now that we have all theses utility functions to help out and
1590 # logically divide up the task, generate the group.
1587 # logically divide up the task, generate the group.
1591 def gengroup():
1588 def gengroup():
1592 # The set of changed files starts empty.
1589 # The set of changed files starts empty.
1593 changedfiles = set()
1590 changedfiles = set()
1594 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1591 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1595
1592
1596 # Create a changenode group generator that will call our functions
1593 # Create a changenode group generator that will call our functions
1597 # back to lookup the owning changenode and collect information.
1594 # back to lookup the owning changenode and collect information.
1598 group = cl.group(msng_cl_lst, identity, collect)
1595 group = cl.group(msng_cl_lst, identity, collect)
1599 for cnt, chnk in enumerate(group):
1596 for cnt, chnk in enumerate(group):
1600 yield chnk
1597 yield chnk
1601 # revlog.group yields three entries per node, so
1598 # revlog.group yields three entries per node, so
1602 # dividing by 3 gives an approximation of how many
1599 # dividing by 3 gives an approximation of how many
1603 # nodes have been processed.
1600 # nodes have been processed.
1604 self.ui.progress(_('bundling'), cnt / 3,
1601 self.ui.progress(_('bundling'), cnt / 3,
1605 unit=_('changesets'))
1602 unit=_('changesets'))
1606 changecount = cnt / 3
1603 changecount = cnt / 3
1607 self.ui.progress(_('bundling'), None)
1604 self.ui.progress(_('bundling'), None)
1608
1605
1609 prune(mnfst, msng_mnfst_set)
1606 prune(mnfst, msng_mnfst_set)
1610 add_extra_nodes(1, msng_mnfst_set)
1607 add_extra_nodes(1, msng_mnfst_set)
1611 msng_mnfst_lst = msng_mnfst_set.keys()
1608 msng_mnfst_lst = msng_mnfst_set.keys()
1612 # Sort the manifestnodes by revision number.
1609 # Sort the manifestnodes by revision number.
1613 msng_mnfst_lst.sort(key=mnfst.rev)
1610 msng_mnfst_lst.sort(key=mnfst.rev)
1614 # Create a generator for the manifestnodes that calls our lookup
1611 # Create a generator for the manifestnodes that calls our lookup
1615 # and data collection functions back.
1612 # and data collection functions back.
1616 group = mnfst.group(msng_mnfst_lst,
1613 group = mnfst.group(msng_mnfst_lst,
1617 lambda mnode: msng_mnfst_set[mnode],
1614 lambda mnode: msng_mnfst_set[mnode],
1618 filenode_collector(changedfiles))
1615 filenode_collector(changedfiles))
1619 efiles = {}
1616 efiles = {}
1620 for cnt, chnk in enumerate(group):
1617 for cnt, chnk in enumerate(group):
1621 if cnt % 3 == 1:
1618 if cnt % 3 == 1:
1622 mnode = chnk[:20]
1619 mnode = chnk[:20]
1623 efiles.update(mnfst.readdelta(mnode))
1620 efiles.update(mnfst.readdelta(mnode))
1624 yield chnk
1621 yield chnk
1625 # see above comment for why we divide by 3
1622 # see above comment for why we divide by 3
1626 self.ui.progress(_('bundling'), cnt / 3,
1623 self.ui.progress(_('bundling'), cnt / 3,
1627 unit=_('manifests'), total=changecount)
1624 unit=_('manifests'), total=changecount)
1628 self.ui.progress(_('bundling'), None)
1625 self.ui.progress(_('bundling'), None)
1629 efiles = len(efiles)
1626 efiles = len(efiles)
1630
1627
1631 # These are no longer needed, dereference and toss the memory for
1628 # These are no longer needed, dereference and toss the memory for
1632 # them.
1629 # them.
1633 msng_mnfst_lst = None
1630 msng_mnfst_lst = None
1634 msng_mnfst_set.clear()
1631 msng_mnfst_set.clear()
1635
1632
1636 if extranodes:
1633 if extranodes:
1637 for fname in extranodes:
1634 for fname in extranodes:
1638 if isinstance(fname, int):
1635 if isinstance(fname, int):
1639 continue
1636 continue
1640 msng_filenode_set.setdefault(fname, {})
1637 msng_filenode_set.setdefault(fname, {})
1641 changedfiles.add(fname)
1638 changedfiles.add(fname)
1642 # Go through all our files in order sorted by name.
1639 # Go through all our files in order sorted by name.
1643 for idx, fname in enumerate(sorted(changedfiles)):
1640 for idx, fname in enumerate(sorted(changedfiles)):
1644 filerevlog = self.file(fname)
1641 filerevlog = self.file(fname)
1645 if not len(filerevlog):
1642 if not len(filerevlog):
1646 raise util.Abort(_("empty or missing revlog for %s") % fname)
1643 raise util.Abort(_("empty or missing revlog for %s") % fname)
1647 # Toss out the filenodes that the recipient isn't really
1644 # Toss out the filenodes that the recipient isn't really
1648 # missing.
1645 # missing.
1649 missingfnodes = msng_filenode_set.pop(fname, {})
1646 missingfnodes = msng_filenode_set.pop(fname, {})
1650 prune(filerevlog, missingfnodes)
1647 prune(filerevlog, missingfnodes)
1651 add_extra_nodes(fname, missingfnodes)
1648 add_extra_nodes(fname, missingfnodes)
1652 # If any filenodes are left, generate the group for them,
1649 # If any filenodes are left, generate the group for them,
1653 # otherwise don't bother.
1650 # otherwise don't bother.
1654 if missingfnodes:
1651 if missingfnodes:
1655 yield changegroup.chunkheader(len(fname))
1652 yield changegroup.chunkheader(len(fname))
1656 yield fname
1653 yield fname
1657 # Sort the filenodes by their revision # (topological order)
1654 # Sort the filenodes by their revision # (topological order)
1658 nodeiter = list(missingfnodes)
1655 nodeiter = list(missingfnodes)
1659 nodeiter.sort(key=filerevlog.rev)
1656 nodeiter.sort(key=filerevlog.rev)
1660 # Create a group generator and only pass in a changenode
1657 # Create a group generator and only pass in a changenode
1661 # lookup function as we need to collect no information
1658 # lookup function as we need to collect no information
1662 # from filenodes.
1659 # from filenodes.
1663 group = filerevlog.group(nodeiter,
1660 group = filerevlog.group(nodeiter,
1664 lambda fnode: missingfnodes[fnode])
1661 lambda fnode: missingfnodes[fnode])
1665 for chnk in group:
1662 for chnk in group:
1666 # even though we print the same progress on
1663 # even though we print the same progress on
1667 # most loop iterations, put the progress call
1664 # most loop iterations, put the progress call
1668 # here so that time estimates (if any) can be updated
1665 # here so that time estimates (if any) can be updated
1669 self.ui.progress(
1666 self.ui.progress(
1670 _('bundling'), idx, item=fname,
1667 _('bundling'), idx, item=fname,
1671 unit=_('files'), total=efiles)
1668 unit=_('files'), total=efiles)
1672 yield chnk
1669 yield chnk
1673 # Signal that no more groups are left.
1670 # Signal that no more groups are left.
1674 yield changegroup.closechunk()
1671 yield changegroup.closechunk()
1675 self.ui.progress(_('bundling'), None)
1672 self.ui.progress(_('bundling'), None)
1676
1673
1677 if msng_cl_lst:
1674 if msng_cl_lst:
1678 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1675 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1679
1676
1680 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1677 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1681
1678
1682 def changegroup(self, basenodes, source):
1679 def changegroup(self, basenodes, source):
1683 # to avoid a race we use changegroupsubset() (issue1320)
1680 # to avoid a race we use changegroupsubset() (issue1320)
1684 return self.changegroupsubset(basenodes, self.heads(), source)
1681 return self.changegroupsubset(basenodes, self.heads(), source)
1685
1682
1686 def _changegroup(self, nodes, source):
1683 def _changegroup(self, nodes, source):
1687 """Compute the changegroup of all nodes that we have that a recipient
1684 """Compute the changegroup of all nodes that we have that a recipient
1688 doesn't. Return a chunkbuffer object whose read() method will return
1685 doesn't. Return a chunkbuffer object whose read() method will return
1689 successive changegroup chunks.
1686 successive changegroup chunks.
1690
1687
1691 This is much easier than the previous function as we can assume that
1688 This is much easier than the previous function as we can assume that
1692 the recipient has any changenode we aren't sending them.
1689 the recipient has any changenode we aren't sending them.
1693
1690
1694 nodes is the set of nodes to send"""
1691 nodes is the set of nodes to send"""
1695
1692
1696 self.hook('preoutgoing', throw=True, source=source)
1693 self.hook('preoutgoing', throw=True, source=source)
1697
1694
1698 cl = self.changelog
1695 cl = self.changelog
1699 revset = set([cl.rev(n) for n in nodes])
1696 revset = set([cl.rev(n) for n in nodes])
1700 self.changegroupinfo(nodes, source)
1697 self.changegroupinfo(nodes, source)
1701
1698
1702 def identity(x):
1699 def identity(x):
1703 return x
1700 return x
1704
1701
1705 def gennodelst(log):
1702 def gennodelst(log):
1706 for r in log:
1703 for r in log:
1707 if log.linkrev(r) in revset:
1704 if log.linkrev(r) in revset:
1708 yield log.node(r)
1705 yield log.node(r)
1709
1706
1710 def lookuplinkrev_func(revlog):
1707 def lookuplinkrev_func(revlog):
1711 def lookuplinkrev(n):
1708 def lookuplinkrev(n):
1712 return cl.node(revlog.linkrev(revlog.rev(n)))
1709 return cl.node(revlog.linkrev(revlog.rev(n)))
1713 return lookuplinkrev
1710 return lookuplinkrev
1714
1711
1715 def gengroup():
1712 def gengroup():
1716 '''yield a sequence of changegroup chunks (strings)'''
1713 '''yield a sequence of changegroup chunks (strings)'''
1717 # construct a list of all changed files
1714 # construct a list of all changed files
1718 changedfiles = set()
1715 changedfiles = set()
1719 mmfs = {}
1716 mmfs = {}
1720 collect = changegroup.collector(cl, mmfs, changedfiles)
1717 collect = changegroup.collector(cl, mmfs, changedfiles)
1721
1718
1722 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1719 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1723 # revlog.group yields three entries per node, so
1720 # revlog.group yields three entries per node, so
1724 # dividing by 3 gives an approximation of how many
1721 # dividing by 3 gives an approximation of how many
1725 # nodes have been processed.
1722 # nodes have been processed.
1726 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1723 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1727 yield chnk
1724 yield chnk
1728 changecount = cnt / 3
1725 changecount = cnt / 3
1729 self.ui.progress(_('bundling'), None)
1726 self.ui.progress(_('bundling'), None)
1730
1727
1731 mnfst = self.manifest
1728 mnfst = self.manifest
1732 nodeiter = gennodelst(mnfst)
1729 nodeiter = gennodelst(mnfst)
1733 efiles = {}
1730 efiles = {}
1734 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1731 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1735 lookuplinkrev_func(mnfst))):
1732 lookuplinkrev_func(mnfst))):
1736 if cnt % 3 == 1:
1733 if cnt % 3 == 1:
1737 mnode = chnk[:20]
1734 mnode = chnk[:20]
1738 efiles.update(mnfst.readdelta(mnode))
1735 efiles.update(mnfst.readdelta(mnode))
1739 # see above comment for why we divide by 3
1736 # see above comment for why we divide by 3
1740 self.ui.progress(_('bundling'), cnt / 3,
1737 self.ui.progress(_('bundling'), cnt / 3,
1741 unit=_('manifests'), total=changecount)
1738 unit=_('manifests'), total=changecount)
1742 yield chnk
1739 yield chnk
1743 efiles = len(efiles)
1740 efiles = len(efiles)
1744 self.ui.progress(_('bundling'), None)
1741 self.ui.progress(_('bundling'), None)
1745
1742
1746 for idx, fname in enumerate(sorted(changedfiles)):
1743 for idx, fname in enumerate(sorted(changedfiles)):
1747 filerevlog = self.file(fname)
1744 filerevlog = self.file(fname)
1748 if not len(filerevlog):
1745 if not len(filerevlog):
1749 raise util.Abort(_("empty or missing revlog for %s") % fname)
1746 raise util.Abort(_("empty or missing revlog for %s") % fname)
1750 nodeiter = gennodelst(filerevlog)
1747 nodeiter = gennodelst(filerevlog)
1751 nodeiter = list(nodeiter)
1748 nodeiter = list(nodeiter)
1752 if nodeiter:
1749 if nodeiter:
1753 yield changegroup.chunkheader(len(fname))
1750 yield changegroup.chunkheader(len(fname))
1754 yield fname
1751 yield fname
1755 lookup = lookuplinkrev_func(filerevlog)
1752 lookup = lookuplinkrev_func(filerevlog)
1756 for chnk in filerevlog.group(nodeiter, lookup):
1753 for chnk in filerevlog.group(nodeiter, lookup):
1757 self.ui.progress(
1754 self.ui.progress(
1758 _('bundling'), idx, item=fname,
1755 _('bundling'), idx, item=fname,
1759 total=efiles, unit=_('files'))
1756 total=efiles, unit=_('files'))
1760 yield chnk
1757 yield chnk
1761 self.ui.progress(_('bundling'), None)
1758 self.ui.progress(_('bundling'), None)
1762
1759
1763 yield changegroup.closechunk()
1760 yield changegroup.closechunk()
1764
1761
1765 if nodes:
1762 if nodes:
1766 self.hook('outgoing', node=hex(nodes[0]), source=source)
1763 self.hook('outgoing', node=hex(nodes[0]), source=source)
1767
1764
1768 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1765 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1769
1766
1770 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1767 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1771 """Add the changegroup returned by source.read() to this repo.
1768 """Add the changegroup returned by source.read() to this repo.
1772 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1769 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1773 the URL of the repo where this changegroup is coming from.
1770 the URL of the repo where this changegroup is coming from.
1774 If lock is not None, the function takes ownership of the lock
1771 If lock is not None, the function takes ownership of the lock
1775 and releases it after the changegroup is added.
1772 and releases it after the changegroup is added.
1776
1773
1777 Return an integer summarizing the change to this repo:
1774 Return an integer summarizing the change to this repo:
1778 - nothing changed or no source: 0
1775 - nothing changed or no source: 0
1779 - more heads than before: 1+added heads (2..n)
1776 - more heads than before: 1+added heads (2..n)
1780 - fewer heads than before: -1-removed heads (-2..-n)
1777 - fewer heads than before: -1-removed heads (-2..-n)
1781 - number of heads stays the same: 1
1778 - number of heads stays the same: 1
1782 """
1779 """
1783 def csmap(x):
1780 def csmap(x):
1784 self.ui.debug("add changeset %s\n" % short(x))
1781 self.ui.debug("add changeset %s\n" % short(x))
1785 return len(cl)
1782 return len(cl)
1786
1783
1787 def revmap(x):
1784 def revmap(x):
1788 return cl.rev(x)
1785 return cl.rev(x)
1789
1786
1790 if not source:
1787 if not source:
1791 return 0
1788 return 0
1792
1789
1793 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1790 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1794
1791
1795 changesets = files = revisions = 0
1792 changesets = files = revisions = 0
1796 efiles = set()
1793 efiles = set()
1797
1794
1798 # write changelog data to temp files so concurrent readers will not see
1795 # write changelog data to temp files so concurrent readers will not see
1799 # inconsistent view
1796 # inconsistent view
1800 cl = self.changelog
1797 cl = self.changelog
1801 cl.delayupdate()
1798 cl.delayupdate()
1802 oldheads = len(cl.heads())
1799 oldheads = len(cl.heads())
1803
1800
1804 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1801 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1805 try:
1802 try:
1806 trp = weakref.proxy(tr)
1803 trp = weakref.proxy(tr)
1807 # pull off the changeset group
1804 # pull off the changeset group
1808 self.ui.status(_("adding changesets\n"))
1805 self.ui.status(_("adding changesets\n"))
1809 clstart = len(cl)
1806 clstart = len(cl)
1810 class prog(object):
1807 class prog(object):
1811 step = _('changesets')
1808 step = _('changesets')
1812 count = 1
1809 count = 1
1813 ui = self.ui
1810 ui = self.ui
1814 total = None
1811 total = None
1815 def __call__(self):
1812 def __call__(self):
1816 self.ui.progress(self.step, self.count, unit=_('chunks'),
1813 self.ui.progress(self.step, self.count, unit=_('chunks'),
1817 total=self.total)
1814 total=self.total)
1818 self.count += 1
1815 self.count += 1
1819 pr = prog()
1816 pr = prog()
1820 source.callback = pr
1817 source.callback = pr
1821
1818
1822 if (cl.addgroup(source, csmap, trp) is None
1819 if (cl.addgroup(source, csmap, trp) is None
1823 and not emptyok):
1820 and not emptyok):
1824 raise util.Abort(_("received changelog group is empty"))
1821 raise util.Abort(_("received changelog group is empty"))
1825 clend = len(cl)
1822 clend = len(cl)
1826 changesets = clend - clstart
1823 changesets = clend - clstart
1827 for c in xrange(clstart, clend):
1824 for c in xrange(clstart, clend):
1828 efiles.update(self[c].files())
1825 efiles.update(self[c].files())
1829 efiles = len(efiles)
1826 efiles = len(efiles)
1830 self.ui.progress(_('changesets'), None)
1827 self.ui.progress(_('changesets'), None)
1831
1828
1832 # pull off the manifest group
1829 # pull off the manifest group
1833 self.ui.status(_("adding manifests\n"))
1830 self.ui.status(_("adding manifests\n"))
1834 pr.step = _('manifests')
1831 pr.step = _('manifests')
1835 pr.count = 1
1832 pr.count = 1
1836 pr.total = changesets # manifests <= changesets
1833 pr.total = changesets # manifests <= changesets
1837 # no need to check for empty manifest group here:
1834 # no need to check for empty manifest group here:
1838 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1835 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1839 # no new manifest will be created and the manifest group will
1836 # no new manifest will be created and the manifest group will
1840 # be empty during the pull
1837 # be empty during the pull
1841 self.manifest.addgroup(source, revmap, trp)
1838 self.manifest.addgroup(source, revmap, trp)
1842 self.ui.progress(_('manifests'), None)
1839 self.ui.progress(_('manifests'), None)
1843
1840
1844 needfiles = {}
1841 needfiles = {}
1845 if self.ui.configbool('server', 'validate', default=False):
1842 if self.ui.configbool('server', 'validate', default=False):
1846 # validate incoming csets have their manifests
1843 # validate incoming csets have their manifests
1847 for cset in xrange(clstart, clend):
1844 for cset in xrange(clstart, clend):
1848 mfest = self.changelog.read(self.changelog.node(cset))[0]
1845 mfest = self.changelog.read(self.changelog.node(cset))[0]
1849 mfest = self.manifest.readdelta(mfest)
1846 mfest = self.manifest.readdelta(mfest)
1850 # store file nodes we must see
1847 # store file nodes we must see
1851 for f, n in mfest.iteritems():
1848 for f, n in mfest.iteritems():
1852 needfiles.setdefault(f, set()).add(n)
1849 needfiles.setdefault(f, set()).add(n)
1853
1850
1854 # process the files
1851 # process the files
1855 self.ui.status(_("adding file changes\n"))
1852 self.ui.status(_("adding file changes\n"))
1856 pr.step = 'files'
1853 pr.step = 'files'
1857 pr.count = 1
1854 pr.count = 1
1858 pr.total = efiles
1855 pr.total = efiles
1859 source.callback = None
1856 source.callback = None
1860
1857
1861 while 1:
1858 while 1:
1862 f = source.chunk()
1859 f = source.chunk()
1863 if not f:
1860 if not f:
1864 break
1861 break
1865 self.ui.debug("adding %s revisions\n" % f)
1862 self.ui.debug("adding %s revisions\n" % f)
1866 pr()
1863 pr()
1867 fl = self.file(f)
1864 fl = self.file(f)
1868 o = len(fl)
1865 o = len(fl)
1869 if fl.addgroup(source, revmap, trp) is None:
1866 if fl.addgroup(source, revmap, trp) is None:
1870 raise util.Abort(_("received file revlog group is empty"))
1867 raise util.Abort(_("received file revlog group is empty"))
1871 revisions += len(fl) - o
1868 revisions += len(fl) - o
1872 files += 1
1869 files += 1
1873 if f in needfiles:
1870 if f in needfiles:
1874 needs = needfiles[f]
1871 needs = needfiles[f]
1875 for new in xrange(o, len(fl)):
1872 for new in xrange(o, len(fl)):
1876 n = fl.node(new)
1873 n = fl.node(new)
1877 if n in needs:
1874 if n in needs:
1878 needs.remove(n)
1875 needs.remove(n)
1879 if not needs:
1876 if not needs:
1880 del needfiles[f]
1877 del needfiles[f]
1881 self.ui.progress(_('files'), None)
1878 self.ui.progress(_('files'), None)
1882
1879
1883 for f, needs in needfiles.iteritems():
1880 for f, needs in needfiles.iteritems():
1884 fl = self.file(f)
1881 fl = self.file(f)
1885 for n in needs:
1882 for n in needs:
1886 try:
1883 try:
1887 fl.rev(n)
1884 fl.rev(n)
1888 except error.LookupError:
1885 except error.LookupError:
1889 raise util.Abort(
1886 raise util.Abort(
1890 _('missing file data for %s:%s - run hg verify') %
1887 _('missing file data for %s:%s - run hg verify') %
1891 (f, hex(n)))
1888 (f, hex(n)))
1892
1889
1893 newheads = len(cl.heads())
1890 newheads = len(cl.heads())
1894 heads = ""
1891 heads = ""
1895 if oldheads and newheads != oldheads:
1892 if oldheads and newheads != oldheads:
1896 heads = _(" (%+d heads)") % (newheads - oldheads)
1893 heads = _(" (%+d heads)") % (newheads - oldheads)
1897
1894
1898 self.ui.status(_("added %d changesets"
1895 self.ui.status(_("added %d changesets"
1899 " with %d changes to %d files%s\n")
1896 " with %d changes to %d files%s\n")
1900 % (changesets, revisions, files, heads))
1897 % (changesets, revisions, files, heads))
1901
1898
1902 if changesets > 0:
1899 if changesets > 0:
1903 p = lambda: cl.writepending() and self.root or ""
1900 p = lambda: cl.writepending() and self.root or ""
1904 self.hook('pretxnchangegroup', throw=True,
1901 self.hook('pretxnchangegroup', throw=True,
1905 node=hex(cl.node(clstart)), source=srctype,
1902 node=hex(cl.node(clstart)), source=srctype,
1906 url=url, pending=p)
1903 url=url, pending=p)
1907
1904
1908 # make changelog see real files again
1905 # make changelog see real files again
1909 cl.finalize(trp)
1906 cl.finalize(trp)
1910
1907
1911 tr.close()
1908 tr.close()
1912 finally:
1909 finally:
1913 tr.release()
1910 tr.release()
1914 if lock:
1911 if lock:
1915 lock.release()
1912 lock.release()
1916
1913
1917 if changesets > 0:
1914 if changesets > 0:
1918 # forcefully update the on-disk branch cache
1915 # forcefully update the on-disk branch cache
1919 self.ui.debug("updating the branch cache\n")
1916 self.ui.debug("updating the branch cache\n")
1920 self.updatebranchcache()
1917 self.updatebranchcache()
1921 self.hook("changegroup", node=hex(cl.node(clstart)),
1918 self.hook("changegroup", node=hex(cl.node(clstart)),
1922 source=srctype, url=url)
1919 source=srctype, url=url)
1923
1920
1924 for i in xrange(clstart, clend):
1921 for i in xrange(clstart, clend):
1925 self.hook("incoming", node=hex(cl.node(i)),
1922 self.hook("incoming", node=hex(cl.node(i)),
1926 source=srctype, url=url)
1923 source=srctype, url=url)
1927
1924
1928 # FIXME - why does this care about tip?
1925 # FIXME - why does this care about tip?
1929 if newheads == oldheads:
1926 if newheads == oldheads:
1930 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1927 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1931
1928
1932 # never return 0 here:
1929 # never return 0 here:
1933 if newheads < oldheads:
1930 if newheads < oldheads:
1934 return newheads - oldheads - 1
1931 return newheads - oldheads - 1
1935 else:
1932 else:
1936 return newheads - oldheads + 1
1933 return newheads - oldheads + 1
1937
1934
1938
1935
1939 def stream_in(self, remote, requirements):
1936 def stream_in(self, remote, requirements):
1940 lock = self.lock()
1937 lock = self.lock()
1941 try:
1938 try:
1942 fp = remote.stream_out()
1939 fp = remote.stream_out()
1943 l = fp.readline()
1940 l = fp.readline()
1944 try:
1941 try:
1945 resp = int(l)
1942 resp = int(l)
1946 except ValueError:
1943 except ValueError:
1947 raise error.ResponseError(
1944 raise error.ResponseError(
1948 _('Unexpected response from remote server:'), l)
1945 _('Unexpected response from remote server:'), l)
1949 if resp == 1:
1946 if resp == 1:
1950 raise util.Abort(_('operation forbidden by server'))
1947 raise util.Abort(_('operation forbidden by server'))
1951 elif resp == 2:
1948 elif resp == 2:
1952 raise util.Abort(_('locking the remote repository failed'))
1949 raise util.Abort(_('locking the remote repository failed'))
1953 elif resp != 0:
1950 elif resp != 0:
1954 raise util.Abort(_('the server sent an unknown error code'))
1951 raise util.Abort(_('the server sent an unknown error code'))
1955 self.ui.status(_('streaming all changes\n'))
1952 self.ui.status(_('streaming all changes\n'))
1956 l = fp.readline()
1953 l = fp.readline()
1957 try:
1954 try:
1958 total_files, total_bytes = map(int, l.split(' ', 1))
1955 total_files, total_bytes = map(int, l.split(' ', 1))
1959 except (ValueError, TypeError):
1956 except (ValueError, TypeError):
1960 raise error.ResponseError(
1957 raise error.ResponseError(
1961 _('Unexpected response from remote server:'), l)
1958 _('Unexpected response from remote server:'), l)
1962 self.ui.status(_('%d files to transfer, %s of data\n') %
1959 self.ui.status(_('%d files to transfer, %s of data\n') %
1963 (total_files, util.bytecount(total_bytes)))
1960 (total_files, util.bytecount(total_bytes)))
1964 start = time.time()
1961 start = time.time()
1965 for i in xrange(total_files):
1962 for i in xrange(total_files):
1966 # XXX doesn't support '\n' or '\r' in filenames
1963 # XXX doesn't support '\n' or '\r' in filenames
1967 l = fp.readline()
1964 l = fp.readline()
1968 try:
1965 try:
1969 name, size = l.split('\0', 1)
1966 name, size = l.split('\0', 1)
1970 size = int(size)
1967 size = int(size)
1971 except (ValueError, TypeError):
1968 except (ValueError, TypeError):
1972 raise error.ResponseError(
1969 raise error.ResponseError(
1973 _('Unexpected response from remote server:'), l)
1970 _('Unexpected response from remote server:'), l)
1974 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1971 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1975 # for backwards compat, name was partially encoded
1972 # for backwards compat, name was partially encoded
1976 ofp = self.sopener(store.decodedir(name), 'w')
1973 ofp = self.sopener(store.decodedir(name), 'w')
1977 for chunk in util.filechunkiter(fp, limit=size):
1974 for chunk in util.filechunkiter(fp, limit=size):
1978 ofp.write(chunk)
1975 ofp.write(chunk)
1979 ofp.close()
1976 ofp.close()
1980 elapsed = time.time() - start
1977 elapsed = time.time() - start
1981 if elapsed <= 0:
1978 if elapsed <= 0:
1982 elapsed = 0.001
1979 elapsed = 0.001
1983 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1980 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1984 (util.bytecount(total_bytes), elapsed,
1981 (util.bytecount(total_bytes), elapsed,
1985 util.bytecount(total_bytes / elapsed)))
1982 util.bytecount(total_bytes / elapsed)))
1986
1983
1987 # new requirements = old non-format requirements + new format-related
1984 # new requirements = old non-format requirements + new format-related
1988 # requirements from the streamed-in repository
1985 # requirements from the streamed-in repository
1989 requirements.update(set(self.requirements) - self.supportedformats)
1986 requirements.update(set(self.requirements) - self.supportedformats)
1990 self._applyrequirements(requirements)
1987 self._applyrequirements(requirements)
1991 self._writerequirements()
1988 self._writerequirements()
1992
1989
1993 self.invalidate()
1990 self.invalidate()
1994 return len(self.heads()) + 1
1991 return len(self.heads()) + 1
1995 finally:
1992 finally:
1996 lock.release()
1993 lock.release()
1997
1994
1998 def clone(self, remote, heads=[], stream=False):
1995 def clone(self, remote, heads=[], stream=False):
1999 '''clone remote repository.
1996 '''clone remote repository.
2000
1997
2001 keyword arguments:
1998 keyword arguments:
2002 heads: list of revs to clone (forces use of pull)
1999 heads: list of revs to clone (forces use of pull)
2003 stream: use streaming clone if possible'''
2000 stream: use streaming clone if possible'''
2004
2001
2005 # now, all clients that can request uncompressed clones can
2002 # now, all clients that can request uncompressed clones can
2006 # read repo formats supported by all servers that can serve
2003 # read repo formats supported by all servers that can serve
2007 # them.
2004 # them.
2008
2005
2009 # if revlog format changes, client will have to check version
2006 # if revlog format changes, client will have to check version
2010 # and format flags on "stream" capability, and use
2007 # and format flags on "stream" capability, and use
2011 # uncompressed only if compatible.
2008 # uncompressed only if compatible.
2012
2009
2013 if stream and not heads:
2010 if stream and not heads:
2014 # 'stream' means remote revlog format is revlogv1 only
2011 # 'stream' means remote revlog format is revlogv1 only
2015 if remote.capable('stream'):
2012 if remote.capable('stream'):
2016 return self.stream_in(remote, set(('revlogv1',)))
2013 return self.stream_in(remote, set(('revlogv1',)))
2017 # otherwise, 'streamreqs' contains the remote revlog format
2014 # otherwise, 'streamreqs' contains the remote revlog format
2018 streamreqs = remote.capable('streamreqs')
2015 streamreqs = remote.capable('streamreqs')
2019 if streamreqs:
2016 if streamreqs:
2020 streamreqs = set(streamreqs.split(','))
2017 streamreqs = set(streamreqs.split(','))
2021 # if we support it, stream in and adjust our requirements
2018 # if we support it, stream in and adjust our requirements
2022 if not streamreqs - self.supportedformats:
2019 if not streamreqs - self.supportedformats:
2023 return self.stream_in(remote, streamreqs)
2020 return self.stream_in(remote, streamreqs)
2024 return self.pull(remote, heads)
2021 return self.pull(remote, heads)
2025
2022
2026 def pushkey(self, namespace, key, old, new):
2023 def pushkey(self, namespace, key, old, new):
2027 return pushkey.push(self, namespace, key, old, new)
2024 return pushkey.push(self, namespace, key, old, new)
2028
2025
2029 def listkeys(self, namespace):
2026 def listkeys(self, namespace):
2030 return pushkey.list(self, namespace)
2027 return pushkey.list(self, namespace)
2031
2028
2032 # used to avoid circular references so destructors work
2029 # used to avoid circular references so destructors work
2033 def aftertrans(files):
2030 def aftertrans(files):
2034 renamefiles = [tuple(t) for t in files]
2031 renamefiles = [tuple(t) for t in files]
2035 def a():
2032 def a():
2036 for src, dest in renamefiles:
2033 for src, dest in renamefiles:
2037 util.rename(src, dest)
2034 util.rename(src, dest)
2038 return a
2035 return a
2039
2036
2040 def undoname(fn):
2037 def undoname(fn):
2041 base, name = os.path.split(fn)
2038 base, name = os.path.split(fn)
2042 assert name.startswith('journal')
2039 assert name.startswith('journal')
2043 return os.path.join(base, name.replace('journal', 'undo', 1))
2040 return os.path.join(base, name.replace('journal', 'undo', 1))
2044
2041
2045 def instance(ui, path, create):
2042 def instance(ui, path, create):
2046 return localrepository(ui, util.drop_scheme('file', path), create)
2043 return localrepository(ui, util.drop_scheme('file', path), create)
2047
2044
2048 def islocal(path):
2045 def islocal(path):
2049 return True
2046 return True
General Comments 0
You need to be logged in to leave comments. Login now