##// END OF EJS Templates
fncachestore: defer updating the fncache file to a single file open...
Adrian Buehlmann -
r13391:d00bbff8 default
parent child Browse files
Show More
@@ -1,2016 +1,2016
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164 @util.propertycache
164 @util.propertycache
165 def _bookmarks(self):
165 def _bookmarks(self):
166 return bookmarks.read(self)
166 return bookmarks.read(self)
167
167
168 @util.propertycache
168 @util.propertycache
169 def _bookmarkcurrent(self):
169 def _bookmarkcurrent(self):
170 return bookmarks.readcurrent(self)
170 return bookmarks.readcurrent(self)
171
171
172 @propertycache
172 @propertycache
173 def changelog(self):
173 def changelog(self):
174 c = changelog.changelog(self.sopener)
174 c = changelog.changelog(self.sopener)
175 if 'HG_PENDING' in os.environ:
175 if 'HG_PENDING' in os.environ:
176 p = os.environ['HG_PENDING']
176 p = os.environ['HG_PENDING']
177 if p.startswith(self.root):
177 if p.startswith(self.root):
178 c.readpending('00changelog.i.a')
178 c.readpending('00changelog.i.a')
179 self.sopener.options['defversion'] = c.version
179 self.sopener.options['defversion'] = c.version
180 return c
180 return c
181
181
182 @propertycache
182 @propertycache
183 def manifest(self):
183 def manifest(self):
184 return manifest.manifest(self.sopener)
184 return manifest.manifest(self.sopener)
185
185
186 @propertycache
186 @propertycache
187 def dirstate(self):
187 def dirstate(self):
188 warned = [0]
188 warned = [0]
189 def validate(node):
189 def validate(node):
190 try:
190 try:
191 r = self.changelog.rev(node)
191 r = self.changelog.rev(node)
192 return node
192 return node
193 except error.LookupError:
193 except error.LookupError:
194 if not warned[0]:
194 if not warned[0]:
195 warned[0] = True
195 warned[0] = True
196 self.ui.warn(_("warning: ignoring unknown"
196 self.ui.warn(_("warning: ignoring unknown"
197 " working parent %s!\n") % short(node))
197 " working parent %s!\n") % short(node))
198 return nullid
198 return nullid
199
199
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201
201
202 def __getitem__(self, changeid):
202 def __getitem__(self, changeid):
203 if changeid is None:
203 if changeid is None:
204 return context.workingctx(self)
204 return context.workingctx(self)
205 return context.changectx(self, changeid)
205 return context.changectx(self, changeid)
206
206
207 def __contains__(self, changeid):
207 def __contains__(self, changeid):
208 try:
208 try:
209 return bool(self.lookup(changeid))
209 return bool(self.lookup(changeid))
210 except error.RepoLookupError:
210 except error.RepoLookupError:
211 return False
211 return False
212
212
213 def __nonzero__(self):
213 def __nonzero__(self):
214 return True
214 return True
215
215
216 def __len__(self):
216 def __len__(self):
217 return len(self.changelog)
217 return len(self.changelog)
218
218
219 def __iter__(self):
219 def __iter__(self):
220 for i in xrange(len(self)):
220 for i in xrange(len(self)):
221 yield i
221 yield i
222
222
223 def url(self):
223 def url(self):
224 return 'file:' + self.root
224 return 'file:' + self.root
225
225
226 def hook(self, name, throw=False, **args):
226 def hook(self, name, throw=False, **args):
227 return hook.hook(self.ui, self, name, throw, **args)
227 return hook.hook(self.ui, self, name, throw, **args)
228
228
229 tag_disallowed = ':\r\n'
229 tag_disallowed = ':\r\n'
230
230
231 def _tag(self, names, node, message, local, user, date, extra={}):
231 def _tag(self, names, node, message, local, user, date, extra={}):
232 if isinstance(names, str):
232 if isinstance(names, str):
233 allchars = names
233 allchars = names
234 names = (names,)
234 names = (names,)
235 else:
235 else:
236 allchars = ''.join(names)
236 allchars = ''.join(names)
237 for c in self.tag_disallowed:
237 for c in self.tag_disallowed:
238 if c in allchars:
238 if c in allchars:
239 raise util.Abort(_('%r cannot be used in a tag name') % c)
239 raise util.Abort(_('%r cannot be used in a tag name') % c)
240
240
241 branches = self.branchmap()
241 branches = self.branchmap()
242 for name in names:
242 for name in names:
243 self.hook('pretag', throw=True, node=hex(node), tag=name,
243 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 local=local)
244 local=local)
245 if name in branches:
245 if name in branches:
246 self.ui.warn(_("warning: tag %s conflicts with existing"
246 self.ui.warn(_("warning: tag %s conflicts with existing"
247 " branch name\n") % name)
247 " branch name\n") % name)
248
248
249 def writetags(fp, names, munge, prevtags):
249 def writetags(fp, names, munge, prevtags):
250 fp.seek(0, 2)
250 fp.seek(0, 2)
251 if prevtags and prevtags[-1] != '\n':
251 if prevtags and prevtags[-1] != '\n':
252 fp.write('\n')
252 fp.write('\n')
253 for name in names:
253 for name in names:
254 m = munge and munge(name) or name
254 m = munge and munge(name) or name
255 if self._tagtypes and name in self._tagtypes:
255 if self._tagtypes and name in self._tagtypes:
256 old = self._tags.get(name, nullid)
256 old = self._tags.get(name, nullid)
257 fp.write('%s %s\n' % (hex(old), m))
257 fp.write('%s %s\n' % (hex(old), m))
258 fp.write('%s %s\n' % (hex(node), m))
258 fp.write('%s %s\n' % (hex(node), m))
259 fp.close()
259 fp.close()
260
260
261 prevtags = ''
261 prevtags = ''
262 if local:
262 if local:
263 try:
263 try:
264 fp = self.opener('localtags', 'r+')
264 fp = self.opener('localtags', 'r+')
265 except IOError:
265 except IOError:
266 fp = self.opener('localtags', 'a')
266 fp = self.opener('localtags', 'a')
267 else:
267 else:
268 prevtags = fp.read()
268 prevtags = fp.read()
269
269
270 # local tags are stored in the current charset
270 # local tags are stored in the current charset
271 writetags(fp, names, None, prevtags)
271 writetags(fp, names, None, prevtags)
272 for name in names:
272 for name in names:
273 self.hook('tag', node=hex(node), tag=name, local=local)
273 self.hook('tag', node=hex(node), tag=name, local=local)
274 return
274 return
275
275
276 try:
276 try:
277 fp = self.wfile('.hgtags', 'rb+')
277 fp = self.wfile('.hgtags', 'rb+')
278 except IOError:
278 except IOError:
279 fp = self.wfile('.hgtags', 'ab')
279 fp = self.wfile('.hgtags', 'ab')
280 else:
280 else:
281 prevtags = fp.read()
281 prevtags = fp.read()
282
282
283 # committed tags are stored in UTF-8
283 # committed tags are stored in UTF-8
284 writetags(fp, names, encoding.fromlocal, prevtags)
284 writetags(fp, names, encoding.fromlocal, prevtags)
285
285
286 if '.hgtags' not in self.dirstate:
286 if '.hgtags' not in self.dirstate:
287 self[None].add(['.hgtags'])
287 self[None].add(['.hgtags'])
288
288
289 m = matchmod.exact(self.root, '', ['.hgtags'])
289 m = matchmod.exact(self.root, '', ['.hgtags'])
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
291
291
292 for name in names:
292 for name in names:
293 self.hook('tag', node=hex(node), tag=name, local=local)
293 self.hook('tag', node=hex(node), tag=name, local=local)
294
294
295 return tagnode
295 return tagnode
296
296
297 def tag(self, names, node, message, local, user, date):
297 def tag(self, names, node, message, local, user, date):
298 '''tag a revision with one or more symbolic names.
298 '''tag a revision with one or more symbolic names.
299
299
300 names is a list of strings or, when adding a single tag, names may be a
300 names is a list of strings or, when adding a single tag, names may be a
301 string.
301 string.
302
302
303 if local is True, the tags are stored in a per-repository file.
303 if local is True, the tags are stored in a per-repository file.
304 otherwise, they are stored in the .hgtags file, and a new
304 otherwise, they are stored in the .hgtags file, and a new
305 changeset is committed with the change.
305 changeset is committed with the change.
306
306
307 keyword arguments:
307 keyword arguments:
308
308
309 local: whether to store tags in non-version-controlled file
309 local: whether to store tags in non-version-controlled file
310 (default False)
310 (default False)
311
311
312 message: commit message to use if committing
312 message: commit message to use if committing
313
313
314 user: name of user to use if committing
314 user: name of user to use if committing
315
315
316 date: date tuple to use if committing'''
316 date: date tuple to use if committing'''
317
317
318 if not local:
318 if not local:
319 for x in self.status()[:5]:
319 for x in self.status()[:5]:
320 if '.hgtags' in x:
320 if '.hgtags' in x:
321 raise util.Abort(_('working copy of .hgtags is changed '
321 raise util.Abort(_('working copy of .hgtags is changed '
322 '(please commit .hgtags manually)'))
322 '(please commit .hgtags manually)'))
323
323
324 self.tags() # instantiate the cache
324 self.tags() # instantiate the cache
325 self._tag(names, node, message, local, user, date)
325 self._tag(names, node, message, local, user, date)
326
326
327 def tags(self):
327 def tags(self):
328 '''return a mapping of tag to node'''
328 '''return a mapping of tag to node'''
329 if self._tags is None:
329 if self._tags is None:
330 (self._tags, self._tagtypes) = self._findtags()
330 (self._tags, self._tagtypes) = self._findtags()
331
331
332 return self._tags
332 return self._tags
333
333
334 def _findtags(self):
334 def _findtags(self):
335 '''Do the hard work of finding tags. Return a pair of dicts
335 '''Do the hard work of finding tags. Return a pair of dicts
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
337 maps tag name to a string like \'global\' or \'local\'.
337 maps tag name to a string like \'global\' or \'local\'.
338 Subclasses or extensions are free to add their own tags, but
338 Subclasses or extensions are free to add their own tags, but
339 should be aware that the returned dicts will be retained for the
339 should be aware that the returned dicts will be retained for the
340 duration of the localrepo object.'''
340 duration of the localrepo object.'''
341
341
342 # XXX what tagtype should subclasses/extensions use? Currently
342 # XXX what tagtype should subclasses/extensions use? Currently
343 # mq and bookmarks add tags, but do not set the tagtype at all.
343 # mq and bookmarks add tags, but do not set the tagtype at all.
344 # Should each extension invent its own tag type? Should there
344 # Should each extension invent its own tag type? Should there
345 # be one tagtype for all such "virtual" tags? Or is the status
345 # be one tagtype for all such "virtual" tags? Or is the status
346 # quo fine?
346 # quo fine?
347
347
348 alltags = {} # map tag name to (node, hist)
348 alltags = {} # map tag name to (node, hist)
349 tagtypes = {}
349 tagtypes = {}
350
350
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
353
353
354 # Build the return dicts. Have to re-encode tag names because
354 # Build the return dicts. Have to re-encode tag names because
355 # the tags module always uses UTF-8 (in order not to lose info
355 # the tags module always uses UTF-8 (in order not to lose info
356 # writing to the cache), but the rest of Mercurial wants them in
356 # writing to the cache), but the rest of Mercurial wants them in
357 # local encoding.
357 # local encoding.
358 tags = {}
358 tags = {}
359 for (name, (node, hist)) in alltags.iteritems():
359 for (name, (node, hist)) in alltags.iteritems():
360 if node != nullid:
360 if node != nullid:
361 tags[encoding.tolocal(name)] = node
361 tags[encoding.tolocal(name)] = node
362 tags['tip'] = self.changelog.tip()
362 tags['tip'] = self.changelog.tip()
363 tagtypes = dict([(encoding.tolocal(name), value)
363 tagtypes = dict([(encoding.tolocal(name), value)
364 for (name, value) in tagtypes.iteritems()])
364 for (name, value) in tagtypes.iteritems()])
365 return (tags, tagtypes)
365 return (tags, tagtypes)
366
366
367 def tagtype(self, tagname):
367 def tagtype(self, tagname):
368 '''
368 '''
369 return the type of the given tag. result can be:
369 return the type of the given tag. result can be:
370
370
371 'local' : a local tag
371 'local' : a local tag
372 'global' : a global tag
372 'global' : a global tag
373 None : tag does not exist
373 None : tag does not exist
374 '''
374 '''
375
375
376 self.tags()
376 self.tags()
377
377
378 return self._tagtypes.get(tagname)
378 return self._tagtypes.get(tagname)
379
379
380 def tagslist(self):
380 def tagslist(self):
381 '''return a list of tags ordered by revision'''
381 '''return a list of tags ordered by revision'''
382 l = []
382 l = []
383 for t, n in self.tags().iteritems():
383 for t, n in self.tags().iteritems():
384 try:
384 try:
385 r = self.changelog.rev(n)
385 r = self.changelog.rev(n)
386 except:
386 except:
387 r = -2 # sort to the beginning of the list if unknown
387 r = -2 # sort to the beginning of the list if unknown
388 l.append((r, t, n))
388 l.append((r, t, n))
389 return [(t, n) for r, t, n in sorted(l)]
389 return [(t, n) for r, t, n in sorted(l)]
390
390
391 def nodetags(self, node):
391 def nodetags(self, node):
392 '''return the tags associated with a node'''
392 '''return the tags associated with a node'''
393 if not self.nodetagscache:
393 if not self.nodetagscache:
394 self.nodetagscache = {}
394 self.nodetagscache = {}
395 for t, n in self.tags().iteritems():
395 for t, n in self.tags().iteritems():
396 self.nodetagscache.setdefault(n, []).append(t)
396 self.nodetagscache.setdefault(n, []).append(t)
397 for tags in self.nodetagscache.itervalues():
397 for tags in self.nodetagscache.itervalues():
398 tags.sort()
398 tags.sort()
399 return self.nodetagscache.get(node, [])
399 return self.nodetagscache.get(node, [])
400
400
401 def nodebookmarks(self, node):
401 def nodebookmarks(self, node):
402 marks = []
402 marks = []
403 for bookmark, n in self._bookmarks.iteritems():
403 for bookmark, n in self._bookmarks.iteritems():
404 if n == node:
404 if n == node:
405 marks.append(bookmark)
405 marks.append(bookmark)
406 return sorted(marks)
406 return sorted(marks)
407
407
408 def _branchtags(self, partial, lrev):
408 def _branchtags(self, partial, lrev):
409 # TODO: rename this function?
409 # TODO: rename this function?
410 tiprev = len(self) - 1
410 tiprev = len(self) - 1
411 if lrev != tiprev:
411 if lrev != tiprev:
412 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
412 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
413 self._updatebranchcache(partial, ctxgen)
413 self._updatebranchcache(partial, ctxgen)
414 self._writebranchcache(partial, self.changelog.tip(), tiprev)
414 self._writebranchcache(partial, self.changelog.tip(), tiprev)
415
415
416 return partial
416 return partial
417
417
418 def updatebranchcache(self):
418 def updatebranchcache(self):
419 tip = self.changelog.tip()
419 tip = self.changelog.tip()
420 if self._branchcache is not None and self._branchcachetip == tip:
420 if self._branchcache is not None and self._branchcachetip == tip:
421 return self._branchcache
421 return self._branchcache
422
422
423 oldtip = self._branchcachetip
423 oldtip = self._branchcachetip
424 self._branchcachetip = tip
424 self._branchcachetip = tip
425 if oldtip is None or oldtip not in self.changelog.nodemap:
425 if oldtip is None or oldtip not in self.changelog.nodemap:
426 partial, last, lrev = self._readbranchcache()
426 partial, last, lrev = self._readbranchcache()
427 else:
427 else:
428 lrev = self.changelog.rev(oldtip)
428 lrev = self.changelog.rev(oldtip)
429 partial = self._branchcache
429 partial = self._branchcache
430
430
431 self._branchtags(partial, lrev)
431 self._branchtags(partial, lrev)
432 # this private cache holds all heads (not just tips)
432 # this private cache holds all heads (not just tips)
433 self._branchcache = partial
433 self._branchcache = partial
434
434
435 def branchmap(self):
435 def branchmap(self):
436 '''returns a dictionary {branch: [branchheads]}'''
436 '''returns a dictionary {branch: [branchheads]}'''
437 self.updatebranchcache()
437 self.updatebranchcache()
438 return self._branchcache
438 return self._branchcache
439
439
440 def branchtags(self):
440 def branchtags(self):
441 '''return a dict where branch names map to the tipmost head of
441 '''return a dict where branch names map to the tipmost head of
442 the branch, open heads come before closed'''
442 the branch, open heads come before closed'''
443 bt = {}
443 bt = {}
444 for bn, heads in self.branchmap().iteritems():
444 for bn, heads in self.branchmap().iteritems():
445 tip = heads[-1]
445 tip = heads[-1]
446 for h in reversed(heads):
446 for h in reversed(heads):
447 if 'close' not in self.changelog.read(h)[5]:
447 if 'close' not in self.changelog.read(h)[5]:
448 tip = h
448 tip = h
449 break
449 break
450 bt[bn] = tip
450 bt[bn] = tip
451 return bt
451 return bt
452
452
453 def _readbranchcache(self):
453 def _readbranchcache(self):
454 partial = {}
454 partial = {}
455 try:
455 try:
456 f = self.opener("cache/branchheads")
456 f = self.opener("cache/branchheads")
457 lines = f.read().split('\n')
457 lines = f.read().split('\n')
458 f.close()
458 f.close()
459 except (IOError, OSError):
459 except (IOError, OSError):
460 return {}, nullid, nullrev
460 return {}, nullid, nullrev
461
461
462 try:
462 try:
463 last, lrev = lines.pop(0).split(" ", 1)
463 last, lrev = lines.pop(0).split(" ", 1)
464 last, lrev = bin(last), int(lrev)
464 last, lrev = bin(last), int(lrev)
465 if lrev >= len(self) or self[lrev].node() != last:
465 if lrev >= len(self) or self[lrev].node() != last:
466 # invalidate the cache
466 # invalidate the cache
467 raise ValueError('invalidating branch cache (tip differs)')
467 raise ValueError('invalidating branch cache (tip differs)')
468 for l in lines:
468 for l in lines:
469 if not l:
469 if not l:
470 continue
470 continue
471 node, label = l.split(" ", 1)
471 node, label = l.split(" ", 1)
472 label = encoding.tolocal(label.strip())
472 label = encoding.tolocal(label.strip())
473 partial.setdefault(label, []).append(bin(node))
473 partial.setdefault(label, []).append(bin(node))
474 except KeyboardInterrupt:
474 except KeyboardInterrupt:
475 raise
475 raise
476 except Exception, inst:
476 except Exception, inst:
477 if self.ui.debugflag:
477 if self.ui.debugflag:
478 self.ui.warn(str(inst), '\n')
478 self.ui.warn(str(inst), '\n')
479 partial, last, lrev = {}, nullid, nullrev
479 partial, last, lrev = {}, nullid, nullrev
480 return partial, last, lrev
480 return partial, last, lrev
481
481
482 def _writebranchcache(self, branches, tip, tiprev):
482 def _writebranchcache(self, branches, tip, tiprev):
483 try:
483 try:
484 f = self.opener("cache/branchheads", "w", atomictemp=True)
484 f = self.opener("cache/branchheads", "w", atomictemp=True)
485 f.write("%s %s\n" % (hex(tip), tiprev))
485 f.write("%s %s\n" % (hex(tip), tiprev))
486 for label, nodes in branches.iteritems():
486 for label, nodes in branches.iteritems():
487 for node in nodes:
487 for node in nodes:
488 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
488 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
489 f.rename()
489 f.rename()
490 except (IOError, OSError):
490 except (IOError, OSError):
491 pass
491 pass
492
492
493 def _updatebranchcache(self, partial, ctxgen):
493 def _updatebranchcache(self, partial, ctxgen):
494 # collect new branch entries
494 # collect new branch entries
495 newbranches = {}
495 newbranches = {}
496 for c in ctxgen:
496 for c in ctxgen:
497 newbranches.setdefault(c.branch(), []).append(c.node())
497 newbranches.setdefault(c.branch(), []).append(c.node())
498 # if older branchheads are reachable from new ones, they aren't
498 # if older branchheads are reachable from new ones, they aren't
499 # really branchheads. Note checking parents is insufficient:
499 # really branchheads. Note checking parents is insufficient:
500 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
500 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
501 for branch, newnodes in newbranches.iteritems():
501 for branch, newnodes in newbranches.iteritems():
502 bheads = partial.setdefault(branch, [])
502 bheads = partial.setdefault(branch, [])
503 bheads.extend(newnodes)
503 bheads.extend(newnodes)
504 if len(bheads) <= 1:
504 if len(bheads) <= 1:
505 continue
505 continue
506 # starting from tip means fewer passes over reachable
506 # starting from tip means fewer passes over reachable
507 while newnodes:
507 while newnodes:
508 latest = newnodes.pop()
508 latest = newnodes.pop()
509 if latest not in bheads:
509 if latest not in bheads:
510 continue
510 continue
511 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
511 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
512 reachable = self.changelog.reachable(latest, minbhrev)
512 reachable = self.changelog.reachable(latest, minbhrev)
513 reachable.remove(latest)
513 reachable.remove(latest)
514 bheads = [b for b in bheads if b not in reachable]
514 bheads = [b for b in bheads if b not in reachable]
515 partial[branch] = bheads
515 partial[branch] = bheads
516
516
517 def lookup(self, key):
517 def lookup(self, key):
518 if isinstance(key, int):
518 if isinstance(key, int):
519 return self.changelog.node(key)
519 return self.changelog.node(key)
520 elif key == '.':
520 elif key == '.':
521 return self.dirstate.parents()[0]
521 return self.dirstate.parents()[0]
522 elif key == 'null':
522 elif key == 'null':
523 return nullid
523 return nullid
524 elif key == 'tip':
524 elif key == 'tip':
525 return self.changelog.tip()
525 return self.changelog.tip()
526 n = self.changelog._match(key)
526 n = self.changelog._match(key)
527 if n:
527 if n:
528 return n
528 return n
529 if key in self._bookmarks:
529 if key in self._bookmarks:
530 return self._bookmarks[key]
530 return self._bookmarks[key]
531 if key in self.tags():
531 if key in self.tags():
532 return self.tags()[key]
532 return self.tags()[key]
533 if key in self.branchtags():
533 if key in self.branchtags():
534 return self.branchtags()[key]
534 return self.branchtags()[key]
535 n = self.changelog._partialmatch(key)
535 n = self.changelog._partialmatch(key)
536 if n:
536 if n:
537 return n
537 return n
538
538
539 # can't find key, check if it might have come from damaged dirstate
539 # can't find key, check if it might have come from damaged dirstate
540 if key in self.dirstate.parents():
540 if key in self.dirstate.parents():
541 raise error.Abort(_("working directory has unknown parent '%s'!")
541 raise error.Abort(_("working directory has unknown parent '%s'!")
542 % short(key))
542 % short(key))
543 try:
543 try:
544 if len(key) == 20:
544 if len(key) == 20:
545 key = hex(key)
545 key = hex(key)
546 except:
546 except:
547 pass
547 pass
548 raise error.RepoLookupError(_("unknown revision '%s'") % key)
548 raise error.RepoLookupError(_("unknown revision '%s'") % key)
549
549
550 def lookupbranch(self, key, remote=None):
550 def lookupbranch(self, key, remote=None):
551 repo = remote or self
551 repo = remote or self
552 if key in repo.branchmap():
552 if key in repo.branchmap():
553 return key
553 return key
554
554
555 repo = (remote and remote.local()) and remote or self
555 repo = (remote and remote.local()) and remote or self
556 return repo[key].branch()
556 return repo[key].branch()
557
557
558 def local(self):
558 def local(self):
559 return True
559 return True
560
560
561 def join(self, f):
561 def join(self, f):
562 return os.path.join(self.path, f)
562 return os.path.join(self.path, f)
563
563
564 def wjoin(self, f):
564 def wjoin(self, f):
565 return os.path.join(self.root, f)
565 return os.path.join(self.root, f)
566
566
567 def file(self, f):
567 def file(self, f):
568 if f[0] == '/':
568 if f[0] == '/':
569 f = f[1:]
569 f = f[1:]
570 return filelog.filelog(self.sopener, f)
570 return filelog.filelog(self.sopener, f)
571
571
572 def changectx(self, changeid):
572 def changectx(self, changeid):
573 return self[changeid]
573 return self[changeid]
574
574
575 def parents(self, changeid=None):
575 def parents(self, changeid=None):
576 '''get list of changectxs for parents of changeid'''
576 '''get list of changectxs for parents of changeid'''
577 return self[changeid].parents()
577 return self[changeid].parents()
578
578
579 def filectx(self, path, changeid=None, fileid=None):
579 def filectx(self, path, changeid=None, fileid=None):
580 """changeid can be a changeset revision, node, or tag.
580 """changeid can be a changeset revision, node, or tag.
581 fileid can be a file revision or node."""
581 fileid can be a file revision or node."""
582 return context.filectx(self, path, changeid, fileid)
582 return context.filectx(self, path, changeid, fileid)
583
583
584 def getcwd(self):
584 def getcwd(self):
585 return self.dirstate.getcwd()
585 return self.dirstate.getcwd()
586
586
587 def pathto(self, f, cwd=None):
587 def pathto(self, f, cwd=None):
588 return self.dirstate.pathto(f, cwd)
588 return self.dirstate.pathto(f, cwd)
589
589
590 def wfile(self, f, mode='r'):
590 def wfile(self, f, mode='r'):
591 return self.wopener(f, mode)
591 return self.wopener(f, mode)
592
592
593 def _link(self, f):
593 def _link(self, f):
594 return os.path.islink(self.wjoin(f))
594 return os.path.islink(self.wjoin(f))
595
595
596 def _loadfilter(self, filter):
596 def _loadfilter(self, filter):
597 if filter not in self.filterpats:
597 if filter not in self.filterpats:
598 l = []
598 l = []
599 for pat, cmd in self.ui.configitems(filter):
599 for pat, cmd in self.ui.configitems(filter):
600 if cmd == '!':
600 if cmd == '!':
601 continue
601 continue
602 mf = matchmod.match(self.root, '', [pat])
602 mf = matchmod.match(self.root, '', [pat])
603 fn = None
603 fn = None
604 params = cmd
604 params = cmd
605 for name, filterfn in self._datafilters.iteritems():
605 for name, filterfn in self._datafilters.iteritems():
606 if cmd.startswith(name):
606 if cmd.startswith(name):
607 fn = filterfn
607 fn = filterfn
608 params = cmd[len(name):].lstrip()
608 params = cmd[len(name):].lstrip()
609 break
609 break
610 if not fn:
610 if not fn:
611 fn = lambda s, c, **kwargs: util.filter(s, c)
611 fn = lambda s, c, **kwargs: util.filter(s, c)
612 # Wrap old filters not supporting keyword arguments
612 # Wrap old filters not supporting keyword arguments
613 if not inspect.getargspec(fn)[2]:
613 if not inspect.getargspec(fn)[2]:
614 oldfn = fn
614 oldfn = fn
615 fn = lambda s, c, **kwargs: oldfn(s, c)
615 fn = lambda s, c, **kwargs: oldfn(s, c)
616 l.append((mf, fn, params))
616 l.append((mf, fn, params))
617 self.filterpats[filter] = l
617 self.filterpats[filter] = l
618 return self.filterpats[filter]
618 return self.filterpats[filter]
619
619
620 def _filter(self, filterpats, filename, data):
620 def _filter(self, filterpats, filename, data):
621 for mf, fn, cmd in filterpats:
621 for mf, fn, cmd in filterpats:
622 if mf(filename):
622 if mf(filename):
623 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
623 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
624 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
624 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
625 break
625 break
626
626
627 return data
627 return data
628
628
629 @propertycache
629 @propertycache
630 def _encodefilterpats(self):
630 def _encodefilterpats(self):
631 return self._loadfilter('encode')
631 return self._loadfilter('encode')
632
632
633 @propertycache
633 @propertycache
634 def _decodefilterpats(self):
634 def _decodefilterpats(self):
635 return self._loadfilter('decode')
635 return self._loadfilter('decode')
636
636
637 def adddatafilter(self, name, filter):
637 def adddatafilter(self, name, filter):
638 self._datafilters[name] = filter
638 self._datafilters[name] = filter
639
639
640 def wread(self, filename):
640 def wread(self, filename):
641 if self._link(filename):
641 if self._link(filename):
642 data = os.readlink(self.wjoin(filename))
642 data = os.readlink(self.wjoin(filename))
643 else:
643 else:
644 data = self.wopener(filename, 'r').read()
644 data = self.wopener(filename, 'r').read()
645 return self._filter(self._encodefilterpats, filename, data)
645 return self._filter(self._encodefilterpats, filename, data)
646
646
647 def wwrite(self, filename, data, flags):
647 def wwrite(self, filename, data, flags):
648 data = self._filter(self._decodefilterpats, filename, data)
648 data = self._filter(self._decodefilterpats, filename, data)
649 if 'l' in flags:
649 if 'l' in flags:
650 self.wopener.symlink(data, filename)
650 self.wopener.symlink(data, filename)
651 else:
651 else:
652 self.wopener(filename, 'w').write(data)
652 self.wopener(filename, 'w').write(data)
653 if 'x' in flags:
653 if 'x' in flags:
654 util.set_flags(self.wjoin(filename), False, True)
654 util.set_flags(self.wjoin(filename), False, True)
655
655
656 def wwritedata(self, filename, data):
656 def wwritedata(self, filename, data):
657 return self._filter(self._decodefilterpats, filename, data)
657 return self._filter(self._decodefilterpats, filename, data)
658
658
659 def transaction(self, desc):
659 def transaction(self, desc):
660 tr = self._transref and self._transref() or None
660 tr = self._transref and self._transref() or None
661 if tr and tr.running():
661 if tr and tr.running():
662 return tr.nest()
662 return tr.nest()
663
663
664 # abort here if the journal already exists
664 # abort here if the journal already exists
665 if os.path.exists(self.sjoin("journal")):
665 if os.path.exists(self.sjoin("journal")):
666 raise error.RepoError(
666 raise error.RepoError(
667 _("abandoned transaction found - run hg recover"))
667 _("abandoned transaction found - run hg recover"))
668
668
669 # save dirstate for rollback
669 # save dirstate for rollback
670 try:
670 try:
671 ds = self.opener("dirstate").read()
671 ds = self.opener("dirstate").read()
672 except IOError:
672 except IOError:
673 ds = ""
673 ds = ""
674 self.opener("journal.dirstate", "w").write(ds)
674 self.opener("journal.dirstate", "w").write(ds)
675 self.opener("journal.branch", "w").write(
675 self.opener("journal.branch", "w").write(
676 encoding.fromlocal(self.dirstate.branch()))
676 encoding.fromlocal(self.dirstate.branch()))
677 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
677 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
678
678
679 renames = [(self.sjoin("journal"), self.sjoin("undo")),
679 renames = [(self.sjoin("journal"), self.sjoin("undo")),
680 (self.join("journal.dirstate"), self.join("undo.dirstate")),
680 (self.join("journal.dirstate"), self.join("undo.dirstate")),
681 (self.join("journal.branch"), self.join("undo.branch")),
681 (self.join("journal.branch"), self.join("undo.branch")),
682 (self.join("journal.desc"), self.join("undo.desc"))]
682 (self.join("journal.desc"), self.join("undo.desc"))]
683 tr = transaction.transaction(self.ui.warn, self.sopener,
683 tr = transaction.transaction(self.ui.warn, self.sopener,
684 self.sjoin("journal"),
684 self.sjoin("journal"),
685 aftertrans(renames),
685 aftertrans(renames),
686 self.store.createmode)
686 self.store.createmode)
687 self._transref = weakref.ref(tr)
687 self._transref = weakref.ref(tr)
688 return tr
688 return tr
689
689
690 def recover(self):
690 def recover(self):
691 lock = self.lock()
691 lock = self.lock()
692 try:
692 try:
693 if os.path.exists(self.sjoin("journal")):
693 if os.path.exists(self.sjoin("journal")):
694 self.ui.status(_("rolling back interrupted transaction\n"))
694 self.ui.status(_("rolling back interrupted transaction\n"))
695 transaction.rollback(self.sopener, self.sjoin("journal"),
695 transaction.rollback(self.sopener, self.sjoin("journal"),
696 self.ui.warn)
696 self.ui.warn)
697 self.invalidate()
697 self.invalidate()
698 return True
698 return True
699 else:
699 else:
700 self.ui.warn(_("no interrupted transaction available\n"))
700 self.ui.warn(_("no interrupted transaction available\n"))
701 return False
701 return False
702 finally:
702 finally:
703 lock.release()
703 lock.release()
704
704
705 def rollback(self, dryrun=False):
705 def rollback(self, dryrun=False):
706 wlock = lock = None
706 wlock = lock = None
707 try:
707 try:
708 wlock = self.wlock()
708 wlock = self.wlock()
709 lock = self.lock()
709 lock = self.lock()
710 if os.path.exists(self.sjoin("undo")):
710 if os.path.exists(self.sjoin("undo")):
711 try:
711 try:
712 args = self.opener("undo.desc", "r").read().splitlines()
712 args = self.opener("undo.desc", "r").read().splitlines()
713 if len(args) >= 3 and self.ui.verbose:
713 if len(args) >= 3 and self.ui.verbose:
714 desc = _("rolling back to revision %s"
714 desc = _("rolling back to revision %s"
715 " (undo %s: %s)\n") % (
715 " (undo %s: %s)\n") % (
716 int(args[0]) - 1, args[1], args[2])
716 int(args[0]) - 1, args[1], args[2])
717 elif len(args) >= 2:
717 elif len(args) >= 2:
718 desc = _("rolling back to revision %s (undo %s)\n") % (
718 desc = _("rolling back to revision %s (undo %s)\n") % (
719 int(args[0]) - 1, args[1])
719 int(args[0]) - 1, args[1])
720 except IOError:
720 except IOError:
721 desc = _("rolling back unknown transaction\n")
721 desc = _("rolling back unknown transaction\n")
722 self.ui.status(desc)
722 self.ui.status(desc)
723 if dryrun:
723 if dryrun:
724 return
724 return
725 transaction.rollback(self.sopener, self.sjoin("undo"),
725 transaction.rollback(self.sopener, self.sjoin("undo"),
726 self.ui.warn)
726 self.ui.warn)
727 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
727 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
728 if os.path.exists(self.join('undo.bookmarks')):
728 if os.path.exists(self.join('undo.bookmarks')):
729 util.rename(self.join('undo.bookmarks'),
729 util.rename(self.join('undo.bookmarks'),
730 self.join('bookmarks'))
730 self.join('bookmarks'))
731 try:
731 try:
732 branch = self.opener("undo.branch").read()
732 branch = self.opener("undo.branch").read()
733 self.dirstate.setbranch(branch)
733 self.dirstate.setbranch(branch)
734 except IOError:
734 except IOError:
735 self.ui.warn(_("Named branch could not be reset, "
735 self.ui.warn(_("Named branch could not be reset, "
736 "current branch still is: %s\n")
736 "current branch still is: %s\n")
737 % self.dirstate.branch())
737 % self.dirstate.branch())
738 self.invalidate()
738 self.invalidate()
739 self.dirstate.invalidate()
739 self.dirstate.invalidate()
740 self.destroyed()
740 self.destroyed()
741 else:
741 else:
742 self.ui.warn(_("no rollback information available\n"))
742 self.ui.warn(_("no rollback information available\n"))
743 return 1
743 return 1
744 finally:
744 finally:
745 release(lock, wlock)
745 release(lock, wlock)
746
746
747 def invalidatecaches(self):
747 def invalidatecaches(self):
748 self._tags = None
748 self._tags = None
749 self._tagtypes = None
749 self._tagtypes = None
750 self.nodetagscache = None
750 self.nodetagscache = None
751 self._branchcache = None # in UTF-8
751 self._branchcache = None # in UTF-8
752 self._branchcachetip = None
752 self._branchcachetip = None
753
753
754 def invalidate(self):
754 def invalidate(self):
755 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
755 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
756 if a in self.__dict__:
756 if a in self.__dict__:
757 delattr(self, a)
757 delattr(self, a)
758 self.invalidatecaches()
758 self.invalidatecaches()
759
759
760 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
760 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
761 try:
761 try:
762 l = lock.lock(lockname, 0, releasefn, desc=desc)
762 l = lock.lock(lockname, 0, releasefn, desc=desc)
763 except error.LockHeld, inst:
763 except error.LockHeld, inst:
764 if not wait:
764 if not wait:
765 raise
765 raise
766 self.ui.warn(_("waiting for lock on %s held by %r\n") %
766 self.ui.warn(_("waiting for lock on %s held by %r\n") %
767 (desc, inst.locker))
767 (desc, inst.locker))
768 # default to 600 seconds timeout
768 # default to 600 seconds timeout
769 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
769 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
770 releasefn, desc=desc)
770 releasefn, desc=desc)
771 if acquirefn:
771 if acquirefn:
772 acquirefn()
772 acquirefn()
773 return l
773 return l
774
774
775 def lock(self, wait=True):
775 def lock(self, wait=True):
776 '''Lock the repository store (.hg/store) and return a weak reference
776 '''Lock the repository store (.hg/store) and return a weak reference
777 to the lock. Use this before modifying the store (e.g. committing or
777 to the lock. Use this before modifying the store (e.g. committing or
778 stripping). If you are opening a transaction, get a lock as well.)'''
778 stripping). If you are opening a transaction, get a lock as well.)'''
779 l = self._lockref and self._lockref()
779 l = self._lockref and self._lockref()
780 if l is not None and l.held:
780 if l is not None and l.held:
781 l.lock()
781 l.lock()
782 return l
782 return l
783
783
784 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
784 l = self._lock(self.sjoin("lock"), wait, self.store.write,
785 _('repository %s') % self.origroot)
785 self.invalidate, _('repository %s') % self.origroot)
786 self._lockref = weakref.ref(l)
786 self._lockref = weakref.ref(l)
787 return l
787 return l
788
788
789 def wlock(self, wait=True):
789 def wlock(self, wait=True):
790 '''Lock the non-store parts of the repository (everything under
790 '''Lock the non-store parts of the repository (everything under
791 .hg except .hg/store) and return a weak reference to the lock.
791 .hg except .hg/store) and return a weak reference to the lock.
792 Use this before modifying files in .hg.'''
792 Use this before modifying files in .hg.'''
793 l = self._wlockref and self._wlockref()
793 l = self._wlockref and self._wlockref()
794 if l is not None and l.held:
794 if l is not None and l.held:
795 l.lock()
795 l.lock()
796 return l
796 return l
797
797
798 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
798 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
799 self.dirstate.invalidate, _('working directory of %s') %
799 self.dirstate.invalidate, _('working directory of %s') %
800 self.origroot)
800 self.origroot)
801 self._wlockref = weakref.ref(l)
801 self._wlockref = weakref.ref(l)
802 return l
802 return l
803
803
804 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
804 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
805 """
805 """
806 commit an individual file as part of a larger transaction
806 commit an individual file as part of a larger transaction
807 """
807 """
808
808
809 fname = fctx.path()
809 fname = fctx.path()
810 text = fctx.data()
810 text = fctx.data()
811 flog = self.file(fname)
811 flog = self.file(fname)
812 fparent1 = manifest1.get(fname, nullid)
812 fparent1 = manifest1.get(fname, nullid)
813 fparent2 = fparent2o = manifest2.get(fname, nullid)
813 fparent2 = fparent2o = manifest2.get(fname, nullid)
814
814
815 meta = {}
815 meta = {}
816 copy = fctx.renamed()
816 copy = fctx.renamed()
817 if copy and copy[0] != fname:
817 if copy and copy[0] != fname:
818 # Mark the new revision of this file as a copy of another
818 # Mark the new revision of this file as a copy of another
819 # file. This copy data will effectively act as a parent
819 # file. This copy data will effectively act as a parent
820 # of this new revision. If this is a merge, the first
820 # of this new revision. If this is a merge, the first
821 # parent will be the nullid (meaning "look up the copy data")
821 # parent will be the nullid (meaning "look up the copy data")
822 # and the second one will be the other parent. For example:
822 # and the second one will be the other parent. For example:
823 #
823 #
824 # 0 --- 1 --- 3 rev1 changes file foo
824 # 0 --- 1 --- 3 rev1 changes file foo
825 # \ / rev2 renames foo to bar and changes it
825 # \ / rev2 renames foo to bar and changes it
826 # \- 2 -/ rev3 should have bar with all changes and
826 # \- 2 -/ rev3 should have bar with all changes and
827 # should record that bar descends from
827 # should record that bar descends from
828 # bar in rev2 and foo in rev1
828 # bar in rev2 and foo in rev1
829 #
829 #
830 # this allows this merge to succeed:
830 # this allows this merge to succeed:
831 #
831 #
832 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
832 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
833 # \ / merging rev3 and rev4 should use bar@rev2
833 # \ / merging rev3 and rev4 should use bar@rev2
834 # \- 2 --- 4 as the merge base
834 # \- 2 --- 4 as the merge base
835 #
835 #
836
836
837 cfname = copy[0]
837 cfname = copy[0]
838 crev = manifest1.get(cfname)
838 crev = manifest1.get(cfname)
839 newfparent = fparent2
839 newfparent = fparent2
840
840
841 if manifest2: # branch merge
841 if manifest2: # branch merge
842 if fparent2 == nullid or crev is None: # copied on remote side
842 if fparent2 == nullid or crev is None: # copied on remote side
843 if cfname in manifest2:
843 if cfname in manifest2:
844 crev = manifest2[cfname]
844 crev = manifest2[cfname]
845 newfparent = fparent1
845 newfparent = fparent1
846
846
847 # find source in nearest ancestor if we've lost track
847 # find source in nearest ancestor if we've lost track
848 if not crev:
848 if not crev:
849 self.ui.debug(" %s: searching for copy revision for %s\n" %
849 self.ui.debug(" %s: searching for copy revision for %s\n" %
850 (fname, cfname))
850 (fname, cfname))
851 for ancestor in self[None].ancestors():
851 for ancestor in self[None].ancestors():
852 if cfname in ancestor:
852 if cfname in ancestor:
853 crev = ancestor[cfname].filenode()
853 crev = ancestor[cfname].filenode()
854 break
854 break
855
855
856 if crev:
856 if crev:
857 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
857 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
858 meta["copy"] = cfname
858 meta["copy"] = cfname
859 meta["copyrev"] = hex(crev)
859 meta["copyrev"] = hex(crev)
860 fparent1, fparent2 = nullid, newfparent
860 fparent1, fparent2 = nullid, newfparent
861 else:
861 else:
862 self.ui.warn(_("warning: can't find ancestor for '%s' "
862 self.ui.warn(_("warning: can't find ancestor for '%s' "
863 "copied from '%s'!\n") % (fname, cfname))
863 "copied from '%s'!\n") % (fname, cfname))
864
864
865 elif fparent2 != nullid:
865 elif fparent2 != nullid:
866 # is one parent an ancestor of the other?
866 # is one parent an ancestor of the other?
867 fparentancestor = flog.ancestor(fparent1, fparent2)
867 fparentancestor = flog.ancestor(fparent1, fparent2)
868 if fparentancestor == fparent1:
868 if fparentancestor == fparent1:
869 fparent1, fparent2 = fparent2, nullid
869 fparent1, fparent2 = fparent2, nullid
870 elif fparentancestor == fparent2:
870 elif fparentancestor == fparent2:
871 fparent2 = nullid
871 fparent2 = nullid
872
872
873 # is the file changed?
873 # is the file changed?
874 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
874 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
875 changelist.append(fname)
875 changelist.append(fname)
876 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
876 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
877
877
878 # are just the flags changed during merge?
878 # are just the flags changed during merge?
879 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
879 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
880 changelist.append(fname)
880 changelist.append(fname)
881
881
882 return fparent1
882 return fparent1
883
883
884 def commit(self, text="", user=None, date=None, match=None, force=False,
884 def commit(self, text="", user=None, date=None, match=None, force=False,
885 editor=False, extra={}):
885 editor=False, extra={}):
886 """Add a new revision to current repository.
886 """Add a new revision to current repository.
887
887
888 Revision information is gathered from the working directory,
888 Revision information is gathered from the working directory,
889 match can be used to filter the committed files. If editor is
889 match can be used to filter the committed files. If editor is
890 supplied, it is called to get a commit message.
890 supplied, it is called to get a commit message.
891 """
891 """
892
892
893 def fail(f, msg):
893 def fail(f, msg):
894 raise util.Abort('%s: %s' % (f, msg))
894 raise util.Abort('%s: %s' % (f, msg))
895
895
896 if not match:
896 if not match:
897 match = matchmod.always(self.root, '')
897 match = matchmod.always(self.root, '')
898
898
899 if not force:
899 if not force:
900 vdirs = []
900 vdirs = []
901 match.dir = vdirs.append
901 match.dir = vdirs.append
902 match.bad = fail
902 match.bad = fail
903
903
904 wlock = self.wlock()
904 wlock = self.wlock()
905 try:
905 try:
906 wctx = self[None]
906 wctx = self[None]
907 merge = len(wctx.parents()) > 1
907 merge = len(wctx.parents()) > 1
908
908
909 if (not force and merge and match and
909 if (not force and merge and match and
910 (match.files() or match.anypats())):
910 (match.files() or match.anypats())):
911 raise util.Abort(_('cannot partially commit a merge '
911 raise util.Abort(_('cannot partially commit a merge '
912 '(do not specify files or patterns)'))
912 '(do not specify files or patterns)'))
913
913
914 changes = self.status(match=match, clean=force)
914 changes = self.status(match=match, clean=force)
915 if force:
915 if force:
916 changes[0].extend(changes[6]) # mq may commit unchanged files
916 changes[0].extend(changes[6]) # mq may commit unchanged files
917
917
918 # check subrepos
918 # check subrepos
919 subs = []
919 subs = []
920 removedsubs = set()
920 removedsubs = set()
921 for p in wctx.parents():
921 for p in wctx.parents():
922 removedsubs.update(s for s in p.substate if match(s))
922 removedsubs.update(s for s in p.substate if match(s))
923 for s in wctx.substate:
923 for s in wctx.substate:
924 removedsubs.discard(s)
924 removedsubs.discard(s)
925 if match(s) and wctx.sub(s).dirty():
925 if match(s) and wctx.sub(s).dirty():
926 subs.append(s)
926 subs.append(s)
927 if (subs or removedsubs):
927 if (subs or removedsubs):
928 if (not match('.hgsub') and
928 if (not match('.hgsub') and
929 '.hgsub' in (wctx.modified() + wctx.added())):
929 '.hgsub' in (wctx.modified() + wctx.added())):
930 raise util.Abort(_("can't commit subrepos without .hgsub"))
930 raise util.Abort(_("can't commit subrepos without .hgsub"))
931 if '.hgsubstate' not in changes[0]:
931 if '.hgsubstate' not in changes[0]:
932 changes[0].insert(0, '.hgsubstate')
932 changes[0].insert(0, '.hgsubstate')
933
933
934 # make sure all explicit patterns are matched
934 # make sure all explicit patterns are matched
935 if not force and match.files():
935 if not force and match.files():
936 matched = set(changes[0] + changes[1] + changes[2])
936 matched = set(changes[0] + changes[1] + changes[2])
937
937
938 for f in match.files():
938 for f in match.files():
939 if f == '.' or f in matched or f in wctx.substate:
939 if f == '.' or f in matched or f in wctx.substate:
940 continue
940 continue
941 if f in changes[3]: # missing
941 if f in changes[3]: # missing
942 fail(f, _('file not found!'))
942 fail(f, _('file not found!'))
943 if f in vdirs: # visited directory
943 if f in vdirs: # visited directory
944 d = f + '/'
944 d = f + '/'
945 for mf in matched:
945 for mf in matched:
946 if mf.startswith(d):
946 if mf.startswith(d):
947 break
947 break
948 else:
948 else:
949 fail(f, _("no match under directory!"))
949 fail(f, _("no match under directory!"))
950 elif f not in self.dirstate:
950 elif f not in self.dirstate:
951 fail(f, _("file not tracked!"))
951 fail(f, _("file not tracked!"))
952
952
953 if (not force and not extra.get("close") and not merge
953 if (not force and not extra.get("close") and not merge
954 and not (changes[0] or changes[1] or changes[2])
954 and not (changes[0] or changes[1] or changes[2])
955 and wctx.branch() == wctx.p1().branch()):
955 and wctx.branch() == wctx.p1().branch()):
956 return None
956 return None
957
957
958 ms = mergemod.mergestate(self)
958 ms = mergemod.mergestate(self)
959 for f in changes[0]:
959 for f in changes[0]:
960 if f in ms and ms[f] == 'u':
960 if f in ms and ms[f] == 'u':
961 raise util.Abort(_("unresolved merge conflicts "
961 raise util.Abort(_("unresolved merge conflicts "
962 "(see hg resolve)"))
962 "(see hg resolve)"))
963
963
964 cctx = context.workingctx(self, text, user, date, extra, changes)
964 cctx = context.workingctx(self, text, user, date, extra, changes)
965 if editor:
965 if editor:
966 cctx._text = editor(self, cctx, subs)
966 cctx._text = editor(self, cctx, subs)
967 edited = (text != cctx._text)
967 edited = (text != cctx._text)
968
968
969 # commit subs
969 # commit subs
970 if subs or removedsubs:
970 if subs or removedsubs:
971 state = wctx.substate.copy()
971 state = wctx.substate.copy()
972 for s in sorted(subs):
972 for s in sorted(subs):
973 sub = wctx.sub(s)
973 sub = wctx.sub(s)
974 self.ui.status(_('committing subrepository %s\n') %
974 self.ui.status(_('committing subrepository %s\n') %
975 subrepo.subrelpath(sub))
975 subrepo.subrelpath(sub))
976 sr = sub.commit(cctx._text, user, date)
976 sr = sub.commit(cctx._text, user, date)
977 state[s] = (state[s][0], sr)
977 state[s] = (state[s][0], sr)
978 subrepo.writestate(self, state)
978 subrepo.writestate(self, state)
979
979
980 # Save commit message in case this transaction gets rolled back
980 # Save commit message in case this transaction gets rolled back
981 # (e.g. by a pretxncommit hook). Leave the content alone on
981 # (e.g. by a pretxncommit hook). Leave the content alone on
982 # the assumption that the user will use the same editor again.
982 # the assumption that the user will use the same editor again.
983 msgfile = self.opener('last-message.txt', 'wb')
983 msgfile = self.opener('last-message.txt', 'wb')
984 msgfile.write(cctx._text)
984 msgfile.write(cctx._text)
985 msgfile.close()
985 msgfile.close()
986
986
987 p1, p2 = self.dirstate.parents()
987 p1, p2 = self.dirstate.parents()
988 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
988 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
989 try:
989 try:
990 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
990 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
991 ret = self.commitctx(cctx, True)
991 ret = self.commitctx(cctx, True)
992 except:
992 except:
993 if edited:
993 if edited:
994 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
994 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
995 self.ui.write(
995 self.ui.write(
996 _('note: commit message saved in %s\n') % msgfn)
996 _('note: commit message saved in %s\n') % msgfn)
997 raise
997 raise
998
998
999 # update bookmarks, dirstate and mergestate
999 # update bookmarks, dirstate and mergestate
1000 parents = (p1, p2)
1000 parents = (p1, p2)
1001 if p2 == nullid:
1001 if p2 == nullid:
1002 parents = (p1,)
1002 parents = (p1,)
1003 bookmarks.update(self, parents, ret)
1003 bookmarks.update(self, parents, ret)
1004 for f in changes[0] + changes[1]:
1004 for f in changes[0] + changes[1]:
1005 self.dirstate.normal(f)
1005 self.dirstate.normal(f)
1006 for f in changes[2]:
1006 for f in changes[2]:
1007 self.dirstate.forget(f)
1007 self.dirstate.forget(f)
1008 self.dirstate.setparents(ret)
1008 self.dirstate.setparents(ret)
1009 ms.reset()
1009 ms.reset()
1010 finally:
1010 finally:
1011 wlock.release()
1011 wlock.release()
1012
1012
1013 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1013 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1014 return ret
1014 return ret
1015
1015
1016 def commitctx(self, ctx, error=False):
1016 def commitctx(self, ctx, error=False):
1017 """Add a new revision to current repository.
1017 """Add a new revision to current repository.
1018 Revision information is passed via the context argument.
1018 Revision information is passed via the context argument.
1019 """
1019 """
1020
1020
1021 tr = lock = None
1021 tr = lock = None
1022 removed = list(ctx.removed())
1022 removed = list(ctx.removed())
1023 p1, p2 = ctx.p1(), ctx.p2()
1023 p1, p2 = ctx.p1(), ctx.p2()
1024 m1 = p1.manifest().copy()
1024 m1 = p1.manifest().copy()
1025 m2 = p2.manifest()
1025 m2 = p2.manifest()
1026 user = ctx.user()
1026 user = ctx.user()
1027
1027
1028 lock = self.lock()
1028 lock = self.lock()
1029 try:
1029 try:
1030 tr = self.transaction("commit")
1030 tr = self.transaction("commit")
1031 trp = weakref.proxy(tr)
1031 trp = weakref.proxy(tr)
1032
1032
1033 # check in files
1033 # check in files
1034 new = {}
1034 new = {}
1035 changed = []
1035 changed = []
1036 linkrev = len(self)
1036 linkrev = len(self)
1037 for f in sorted(ctx.modified() + ctx.added()):
1037 for f in sorted(ctx.modified() + ctx.added()):
1038 self.ui.note(f + "\n")
1038 self.ui.note(f + "\n")
1039 try:
1039 try:
1040 fctx = ctx[f]
1040 fctx = ctx[f]
1041 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1041 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1042 changed)
1042 changed)
1043 m1.set(f, fctx.flags())
1043 m1.set(f, fctx.flags())
1044 except OSError, inst:
1044 except OSError, inst:
1045 self.ui.warn(_("trouble committing %s!\n") % f)
1045 self.ui.warn(_("trouble committing %s!\n") % f)
1046 raise
1046 raise
1047 except IOError, inst:
1047 except IOError, inst:
1048 errcode = getattr(inst, 'errno', errno.ENOENT)
1048 errcode = getattr(inst, 'errno', errno.ENOENT)
1049 if error or errcode and errcode != errno.ENOENT:
1049 if error or errcode and errcode != errno.ENOENT:
1050 self.ui.warn(_("trouble committing %s!\n") % f)
1050 self.ui.warn(_("trouble committing %s!\n") % f)
1051 raise
1051 raise
1052 else:
1052 else:
1053 removed.append(f)
1053 removed.append(f)
1054
1054
1055 # update manifest
1055 # update manifest
1056 m1.update(new)
1056 m1.update(new)
1057 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1057 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1058 drop = [f for f in removed if f in m1]
1058 drop = [f for f in removed if f in m1]
1059 for f in drop:
1059 for f in drop:
1060 del m1[f]
1060 del m1[f]
1061 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1061 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1062 p2.manifestnode(), (new, drop))
1062 p2.manifestnode(), (new, drop))
1063
1063
1064 # update changelog
1064 # update changelog
1065 self.changelog.delayupdate()
1065 self.changelog.delayupdate()
1066 n = self.changelog.add(mn, changed + removed, ctx.description(),
1066 n = self.changelog.add(mn, changed + removed, ctx.description(),
1067 trp, p1.node(), p2.node(),
1067 trp, p1.node(), p2.node(),
1068 user, ctx.date(), ctx.extra().copy())
1068 user, ctx.date(), ctx.extra().copy())
1069 p = lambda: self.changelog.writepending() and self.root or ""
1069 p = lambda: self.changelog.writepending() and self.root or ""
1070 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1070 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1071 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1071 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1072 parent2=xp2, pending=p)
1072 parent2=xp2, pending=p)
1073 self.changelog.finalize(trp)
1073 self.changelog.finalize(trp)
1074 tr.close()
1074 tr.close()
1075
1075
1076 if self._branchcache:
1076 if self._branchcache:
1077 self.updatebranchcache()
1077 self.updatebranchcache()
1078 return n
1078 return n
1079 finally:
1079 finally:
1080 if tr:
1080 if tr:
1081 tr.release()
1081 tr.release()
1082 lock.release()
1082 lock.release()
1083
1083
1084 def destroyed(self):
1084 def destroyed(self):
1085 '''Inform the repository that nodes have been destroyed.
1085 '''Inform the repository that nodes have been destroyed.
1086 Intended for use by strip and rollback, so there's a common
1086 Intended for use by strip and rollback, so there's a common
1087 place for anything that has to be done after destroying history.'''
1087 place for anything that has to be done after destroying history.'''
1088 # XXX it might be nice if we could take the list of destroyed
1088 # XXX it might be nice if we could take the list of destroyed
1089 # nodes, but I don't see an easy way for rollback() to do that
1089 # nodes, but I don't see an easy way for rollback() to do that
1090
1090
1091 # Ensure the persistent tag cache is updated. Doing it now
1091 # Ensure the persistent tag cache is updated. Doing it now
1092 # means that the tag cache only has to worry about destroyed
1092 # means that the tag cache only has to worry about destroyed
1093 # heads immediately after a strip/rollback. That in turn
1093 # heads immediately after a strip/rollback. That in turn
1094 # guarantees that "cachetip == currenttip" (comparing both rev
1094 # guarantees that "cachetip == currenttip" (comparing both rev
1095 # and node) always means no nodes have been added or destroyed.
1095 # and node) always means no nodes have been added or destroyed.
1096
1096
1097 # XXX this is suboptimal when qrefresh'ing: we strip the current
1097 # XXX this is suboptimal when qrefresh'ing: we strip the current
1098 # head, refresh the tag cache, then immediately add a new head.
1098 # head, refresh the tag cache, then immediately add a new head.
1099 # But I think doing it this way is necessary for the "instant
1099 # But I think doing it this way is necessary for the "instant
1100 # tag cache retrieval" case to work.
1100 # tag cache retrieval" case to work.
1101 self.invalidatecaches()
1101 self.invalidatecaches()
1102
1102
1103 def walk(self, match, node=None):
1103 def walk(self, match, node=None):
1104 '''
1104 '''
1105 walk recursively through the directory tree or a given
1105 walk recursively through the directory tree or a given
1106 changeset, finding all files matched by the match
1106 changeset, finding all files matched by the match
1107 function
1107 function
1108 '''
1108 '''
1109 return self[node].walk(match)
1109 return self[node].walk(match)
1110
1110
1111 def status(self, node1='.', node2=None, match=None,
1111 def status(self, node1='.', node2=None, match=None,
1112 ignored=False, clean=False, unknown=False,
1112 ignored=False, clean=False, unknown=False,
1113 listsubrepos=False):
1113 listsubrepos=False):
1114 """return status of files between two nodes or node and working directory
1114 """return status of files between two nodes or node and working directory
1115
1115
1116 If node1 is None, use the first dirstate parent instead.
1116 If node1 is None, use the first dirstate parent instead.
1117 If node2 is None, compare node1 with working directory.
1117 If node2 is None, compare node1 with working directory.
1118 """
1118 """
1119
1119
1120 def mfmatches(ctx):
1120 def mfmatches(ctx):
1121 mf = ctx.manifest().copy()
1121 mf = ctx.manifest().copy()
1122 for fn in mf.keys():
1122 for fn in mf.keys():
1123 if not match(fn):
1123 if not match(fn):
1124 del mf[fn]
1124 del mf[fn]
1125 return mf
1125 return mf
1126
1126
1127 if isinstance(node1, context.changectx):
1127 if isinstance(node1, context.changectx):
1128 ctx1 = node1
1128 ctx1 = node1
1129 else:
1129 else:
1130 ctx1 = self[node1]
1130 ctx1 = self[node1]
1131 if isinstance(node2, context.changectx):
1131 if isinstance(node2, context.changectx):
1132 ctx2 = node2
1132 ctx2 = node2
1133 else:
1133 else:
1134 ctx2 = self[node2]
1134 ctx2 = self[node2]
1135
1135
1136 working = ctx2.rev() is None
1136 working = ctx2.rev() is None
1137 parentworking = working and ctx1 == self['.']
1137 parentworking = working and ctx1 == self['.']
1138 match = match or matchmod.always(self.root, self.getcwd())
1138 match = match or matchmod.always(self.root, self.getcwd())
1139 listignored, listclean, listunknown = ignored, clean, unknown
1139 listignored, listclean, listunknown = ignored, clean, unknown
1140
1140
1141 # load earliest manifest first for caching reasons
1141 # load earliest manifest first for caching reasons
1142 if not working and ctx2.rev() < ctx1.rev():
1142 if not working and ctx2.rev() < ctx1.rev():
1143 ctx2.manifest()
1143 ctx2.manifest()
1144
1144
1145 if not parentworking:
1145 if not parentworking:
1146 def bad(f, msg):
1146 def bad(f, msg):
1147 if f not in ctx1:
1147 if f not in ctx1:
1148 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1148 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1149 match.bad = bad
1149 match.bad = bad
1150
1150
1151 if working: # we need to scan the working dir
1151 if working: # we need to scan the working dir
1152 subrepos = []
1152 subrepos = []
1153 if '.hgsub' in self.dirstate:
1153 if '.hgsub' in self.dirstate:
1154 subrepos = ctx1.substate.keys()
1154 subrepos = ctx1.substate.keys()
1155 s = self.dirstate.status(match, subrepos, listignored,
1155 s = self.dirstate.status(match, subrepos, listignored,
1156 listclean, listunknown)
1156 listclean, listunknown)
1157 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1157 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1158
1158
1159 # check for any possibly clean files
1159 # check for any possibly clean files
1160 if parentworking and cmp:
1160 if parentworking and cmp:
1161 fixup = []
1161 fixup = []
1162 # do a full compare of any files that might have changed
1162 # do a full compare of any files that might have changed
1163 for f in sorted(cmp):
1163 for f in sorted(cmp):
1164 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1164 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1165 or ctx1[f].cmp(ctx2[f])):
1165 or ctx1[f].cmp(ctx2[f])):
1166 modified.append(f)
1166 modified.append(f)
1167 else:
1167 else:
1168 fixup.append(f)
1168 fixup.append(f)
1169
1169
1170 # update dirstate for files that are actually clean
1170 # update dirstate for files that are actually clean
1171 if fixup:
1171 if fixup:
1172 if listclean:
1172 if listclean:
1173 clean += fixup
1173 clean += fixup
1174
1174
1175 try:
1175 try:
1176 # updating the dirstate is optional
1176 # updating the dirstate is optional
1177 # so we don't wait on the lock
1177 # so we don't wait on the lock
1178 wlock = self.wlock(False)
1178 wlock = self.wlock(False)
1179 try:
1179 try:
1180 for f in fixup:
1180 for f in fixup:
1181 self.dirstate.normal(f)
1181 self.dirstate.normal(f)
1182 finally:
1182 finally:
1183 wlock.release()
1183 wlock.release()
1184 except error.LockError:
1184 except error.LockError:
1185 pass
1185 pass
1186
1186
1187 if not parentworking:
1187 if not parentworking:
1188 mf1 = mfmatches(ctx1)
1188 mf1 = mfmatches(ctx1)
1189 if working:
1189 if working:
1190 # we are comparing working dir against non-parent
1190 # we are comparing working dir against non-parent
1191 # generate a pseudo-manifest for the working dir
1191 # generate a pseudo-manifest for the working dir
1192 mf2 = mfmatches(self['.'])
1192 mf2 = mfmatches(self['.'])
1193 for f in cmp + modified + added:
1193 for f in cmp + modified + added:
1194 mf2[f] = None
1194 mf2[f] = None
1195 mf2.set(f, ctx2.flags(f))
1195 mf2.set(f, ctx2.flags(f))
1196 for f in removed:
1196 for f in removed:
1197 if f in mf2:
1197 if f in mf2:
1198 del mf2[f]
1198 del mf2[f]
1199 else:
1199 else:
1200 # we are comparing two revisions
1200 # we are comparing two revisions
1201 deleted, unknown, ignored = [], [], []
1201 deleted, unknown, ignored = [], [], []
1202 mf2 = mfmatches(ctx2)
1202 mf2 = mfmatches(ctx2)
1203
1203
1204 modified, added, clean = [], [], []
1204 modified, added, clean = [], [], []
1205 for fn in mf2:
1205 for fn in mf2:
1206 if fn in mf1:
1206 if fn in mf1:
1207 if (mf1.flags(fn) != mf2.flags(fn) or
1207 if (mf1.flags(fn) != mf2.flags(fn) or
1208 (mf1[fn] != mf2[fn] and
1208 (mf1[fn] != mf2[fn] and
1209 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1209 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1210 modified.append(fn)
1210 modified.append(fn)
1211 elif listclean:
1211 elif listclean:
1212 clean.append(fn)
1212 clean.append(fn)
1213 del mf1[fn]
1213 del mf1[fn]
1214 else:
1214 else:
1215 added.append(fn)
1215 added.append(fn)
1216 removed = mf1.keys()
1216 removed = mf1.keys()
1217
1217
1218 r = modified, added, removed, deleted, unknown, ignored, clean
1218 r = modified, added, removed, deleted, unknown, ignored, clean
1219
1219
1220 if listsubrepos:
1220 if listsubrepos:
1221 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1221 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1222 if working:
1222 if working:
1223 rev2 = None
1223 rev2 = None
1224 else:
1224 else:
1225 rev2 = ctx2.substate[subpath][1]
1225 rev2 = ctx2.substate[subpath][1]
1226 try:
1226 try:
1227 submatch = matchmod.narrowmatcher(subpath, match)
1227 submatch = matchmod.narrowmatcher(subpath, match)
1228 s = sub.status(rev2, match=submatch, ignored=listignored,
1228 s = sub.status(rev2, match=submatch, ignored=listignored,
1229 clean=listclean, unknown=listunknown,
1229 clean=listclean, unknown=listunknown,
1230 listsubrepos=True)
1230 listsubrepos=True)
1231 for rfiles, sfiles in zip(r, s):
1231 for rfiles, sfiles in zip(r, s):
1232 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1232 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1233 except error.LookupError:
1233 except error.LookupError:
1234 self.ui.status(_("skipping missing subrepository: %s\n")
1234 self.ui.status(_("skipping missing subrepository: %s\n")
1235 % subpath)
1235 % subpath)
1236
1236
1237 [l.sort() for l in r]
1237 [l.sort() for l in r]
1238 return r
1238 return r
1239
1239
1240 def heads(self, start=None):
1240 def heads(self, start=None):
1241 heads = self.changelog.heads(start)
1241 heads = self.changelog.heads(start)
1242 # sort the output in rev descending order
1242 # sort the output in rev descending order
1243 return sorted(heads, key=self.changelog.rev, reverse=True)
1243 return sorted(heads, key=self.changelog.rev, reverse=True)
1244
1244
1245 def branchheads(self, branch=None, start=None, closed=False):
1245 def branchheads(self, branch=None, start=None, closed=False):
1246 '''return a (possibly filtered) list of heads for the given branch
1246 '''return a (possibly filtered) list of heads for the given branch
1247
1247
1248 Heads are returned in topological order, from newest to oldest.
1248 Heads are returned in topological order, from newest to oldest.
1249 If branch is None, use the dirstate branch.
1249 If branch is None, use the dirstate branch.
1250 If start is not None, return only heads reachable from start.
1250 If start is not None, return only heads reachable from start.
1251 If closed is True, return heads that are marked as closed as well.
1251 If closed is True, return heads that are marked as closed as well.
1252 '''
1252 '''
1253 if branch is None:
1253 if branch is None:
1254 branch = self[None].branch()
1254 branch = self[None].branch()
1255 branches = self.branchmap()
1255 branches = self.branchmap()
1256 if branch not in branches:
1256 if branch not in branches:
1257 return []
1257 return []
1258 # the cache returns heads ordered lowest to highest
1258 # the cache returns heads ordered lowest to highest
1259 bheads = list(reversed(branches[branch]))
1259 bheads = list(reversed(branches[branch]))
1260 if start is not None:
1260 if start is not None:
1261 # filter out the heads that cannot be reached from startrev
1261 # filter out the heads that cannot be reached from startrev
1262 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1262 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1263 bheads = [h for h in bheads if h in fbheads]
1263 bheads = [h for h in bheads if h in fbheads]
1264 if not closed:
1264 if not closed:
1265 bheads = [h for h in bheads if
1265 bheads = [h for h in bheads if
1266 ('close' not in self.changelog.read(h)[5])]
1266 ('close' not in self.changelog.read(h)[5])]
1267 return bheads
1267 return bheads
1268
1268
1269 def branches(self, nodes):
1269 def branches(self, nodes):
1270 if not nodes:
1270 if not nodes:
1271 nodes = [self.changelog.tip()]
1271 nodes = [self.changelog.tip()]
1272 b = []
1272 b = []
1273 for n in nodes:
1273 for n in nodes:
1274 t = n
1274 t = n
1275 while 1:
1275 while 1:
1276 p = self.changelog.parents(n)
1276 p = self.changelog.parents(n)
1277 if p[1] != nullid or p[0] == nullid:
1277 if p[1] != nullid or p[0] == nullid:
1278 b.append((t, n, p[0], p[1]))
1278 b.append((t, n, p[0], p[1]))
1279 break
1279 break
1280 n = p[0]
1280 n = p[0]
1281 return b
1281 return b
1282
1282
1283 def between(self, pairs):
1283 def between(self, pairs):
1284 r = []
1284 r = []
1285
1285
1286 for top, bottom in pairs:
1286 for top, bottom in pairs:
1287 n, l, i = top, [], 0
1287 n, l, i = top, [], 0
1288 f = 1
1288 f = 1
1289
1289
1290 while n != bottom and n != nullid:
1290 while n != bottom and n != nullid:
1291 p = self.changelog.parents(n)[0]
1291 p = self.changelog.parents(n)[0]
1292 if i == f:
1292 if i == f:
1293 l.append(n)
1293 l.append(n)
1294 f = f * 2
1294 f = f * 2
1295 n = p
1295 n = p
1296 i += 1
1296 i += 1
1297
1297
1298 r.append(l)
1298 r.append(l)
1299
1299
1300 return r
1300 return r
1301
1301
1302 def pull(self, remote, heads=None, force=False):
1302 def pull(self, remote, heads=None, force=False):
1303 lock = self.lock()
1303 lock = self.lock()
1304 try:
1304 try:
1305 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1305 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1306 force=force)
1306 force=force)
1307 common, fetch, rheads = tmp
1307 common, fetch, rheads = tmp
1308 if not fetch:
1308 if not fetch:
1309 self.ui.status(_("no changes found\n"))
1309 self.ui.status(_("no changes found\n"))
1310 result = 0
1310 result = 0
1311 else:
1311 else:
1312 if heads is None and fetch == [nullid]:
1312 if heads is None and fetch == [nullid]:
1313 self.ui.status(_("requesting all changes\n"))
1313 self.ui.status(_("requesting all changes\n"))
1314 elif heads is None and remote.capable('changegroupsubset'):
1314 elif heads is None and remote.capable('changegroupsubset'):
1315 # issue1320, avoid a race if remote changed after discovery
1315 # issue1320, avoid a race if remote changed after discovery
1316 heads = rheads
1316 heads = rheads
1317
1317
1318 if heads is None:
1318 if heads is None:
1319 cg = remote.changegroup(fetch, 'pull')
1319 cg = remote.changegroup(fetch, 'pull')
1320 elif not remote.capable('changegroupsubset'):
1320 elif not remote.capable('changegroupsubset'):
1321 raise util.Abort(_("partial pull cannot be done because "
1321 raise util.Abort(_("partial pull cannot be done because "
1322 "other repository doesn't support "
1322 "other repository doesn't support "
1323 "changegroupsubset."))
1323 "changegroupsubset."))
1324 else:
1324 else:
1325 cg = remote.changegroupsubset(fetch, heads, 'pull')
1325 cg = remote.changegroupsubset(fetch, heads, 'pull')
1326 result = self.addchangegroup(cg, 'pull', remote.url(),
1326 result = self.addchangegroup(cg, 'pull', remote.url(),
1327 lock=lock)
1327 lock=lock)
1328 finally:
1328 finally:
1329 lock.release()
1329 lock.release()
1330
1330
1331 self.ui.debug("checking for updated bookmarks\n")
1331 self.ui.debug("checking for updated bookmarks\n")
1332 rb = remote.listkeys('bookmarks')
1332 rb = remote.listkeys('bookmarks')
1333 changed = False
1333 changed = False
1334 for k in rb.keys():
1334 for k in rb.keys():
1335 if k in self._bookmarks:
1335 if k in self._bookmarks:
1336 nr, nl = rb[k], self._bookmarks[k]
1336 nr, nl = rb[k], self._bookmarks[k]
1337 if nr in self:
1337 if nr in self:
1338 cr = self[nr]
1338 cr = self[nr]
1339 cl = self[nl]
1339 cl = self[nl]
1340 if cl.rev() >= cr.rev():
1340 if cl.rev() >= cr.rev():
1341 continue
1341 continue
1342 if cr in cl.descendants():
1342 if cr in cl.descendants():
1343 self._bookmarks[k] = cr.node()
1343 self._bookmarks[k] = cr.node()
1344 changed = True
1344 changed = True
1345 self.ui.status(_("updating bookmark %s\n") % k)
1345 self.ui.status(_("updating bookmark %s\n") % k)
1346 else:
1346 else:
1347 self.ui.warn(_("not updating divergent"
1347 self.ui.warn(_("not updating divergent"
1348 " bookmark %s\n") % k)
1348 " bookmark %s\n") % k)
1349 if changed:
1349 if changed:
1350 bookmarks.write(self)
1350 bookmarks.write(self)
1351
1351
1352 return result
1352 return result
1353
1353
1354 def checkpush(self, force, revs):
1354 def checkpush(self, force, revs):
1355 """Extensions can override this function if additional checks have
1355 """Extensions can override this function if additional checks have
1356 to be performed before pushing, or call it if they override push
1356 to be performed before pushing, or call it if they override push
1357 command.
1357 command.
1358 """
1358 """
1359 pass
1359 pass
1360
1360
1361 def push(self, remote, force=False, revs=None, newbranch=False):
1361 def push(self, remote, force=False, revs=None, newbranch=False):
1362 '''Push outgoing changesets (limited by revs) from the current
1362 '''Push outgoing changesets (limited by revs) from the current
1363 repository to remote. Return an integer:
1363 repository to remote. Return an integer:
1364 - 0 means HTTP error *or* nothing to push
1364 - 0 means HTTP error *or* nothing to push
1365 - 1 means we pushed and remote head count is unchanged *or*
1365 - 1 means we pushed and remote head count is unchanged *or*
1366 we have outgoing changesets but refused to push
1366 we have outgoing changesets but refused to push
1367 - other values as described by addchangegroup()
1367 - other values as described by addchangegroup()
1368 '''
1368 '''
1369 # there are two ways to push to remote repo:
1369 # there are two ways to push to remote repo:
1370 #
1370 #
1371 # addchangegroup assumes local user can lock remote
1371 # addchangegroup assumes local user can lock remote
1372 # repo (local filesystem, old ssh servers).
1372 # repo (local filesystem, old ssh servers).
1373 #
1373 #
1374 # unbundle assumes local user cannot lock remote repo (new ssh
1374 # unbundle assumes local user cannot lock remote repo (new ssh
1375 # servers, http servers).
1375 # servers, http servers).
1376
1376
1377 self.checkpush(force, revs)
1377 self.checkpush(force, revs)
1378 lock = None
1378 lock = None
1379 unbundle = remote.capable('unbundle')
1379 unbundle = remote.capable('unbundle')
1380 if not unbundle:
1380 if not unbundle:
1381 lock = remote.lock()
1381 lock = remote.lock()
1382 try:
1382 try:
1383 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1383 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1384 newbranch)
1384 newbranch)
1385 ret = remote_heads
1385 ret = remote_heads
1386 if cg is not None:
1386 if cg is not None:
1387 if unbundle:
1387 if unbundle:
1388 # local repo finds heads on server, finds out what
1388 # local repo finds heads on server, finds out what
1389 # revs it must push. once revs transferred, if server
1389 # revs it must push. once revs transferred, if server
1390 # finds it has different heads (someone else won
1390 # finds it has different heads (someone else won
1391 # commit/push race), server aborts.
1391 # commit/push race), server aborts.
1392 if force:
1392 if force:
1393 remote_heads = ['force']
1393 remote_heads = ['force']
1394 # ssh: return remote's addchangegroup()
1394 # ssh: return remote's addchangegroup()
1395 # http: return remote's addchangegroup() or 0 for error
1395 # http: return remote's addchangegroup() or 0 for error
1396 ret = remote.unbundle(cg, remote_heads, 'push')
1396 ret = remote.unbundle(cg, remote_heads, 'push')
1397 else:
1397 else:
1398 # we return an integer indicating remote head count change
1398 # we return an integer indicating remote head count change
1399 ret = remote.addchangegroup(cg, 'push', self.url(),
1399 ret = remote.addchangegroup(cg, 'push', self.url(),
1400 lock=lock)
1400 lock=lock)
1401 finally:
1401 finally:
1402 if lock is not None:
1402 if lock is not None:
1403 lock.release()
1403 lock.release()
1404
1404
1405 self.ui.debug("checking for updated bookmarks\n")
1405 self.ui.debug("checking for updated bookmarks\n")
1406 rb = remote.listkeys('bookmarks')
1406 rb = remote.listkeys('bookmarks')
1407 for k in rb.keys():
1407 for k in rb.keys():
1408 if k in self._bookmarks:
1408 if k in self._bookmarks:
1409 nr, nl = rb[k], hex(self._bookmarks[k])
1409 nr, nl = rb[k], hex(self._bookmarks[k])
1410 if nr in self:
1410 if nr in self:
1411 cr = self[nr]
1411 cr = self[nr]
1412 cl = self[nl]
1412 cl = self[nl]
1413 if cl in cr.descendants():
1413 if cl in cr.descendants():
1414 r = remote.pushkey('bookmarks', k, nr, nl)
1414 r = remote.pushkey('bookmarks', k, nr, nl)
1415 if r:
1415 if r:
1416 self.ui.status(_("updating bookmark %s\n") % k)
1416 self.ui.status(_("updating bookmark %s\n") % k)
1417 else:
1417 else:
1418 self.ui.warn(_('updating bookmark %s'
1418 self.ui.warn(_('updating bookmark %s'
1419 ' failed!\n') % k)
1419 ' failed!\n') % k)
1420
1420
1421 return ret
1421 return ret
1422
1422
1423 def changegroupinfo(self, nodes, source):
1423 def changegroupinfo(self, nodes, source):
1424 if self.ui.verbose or source == 'bundle':
1424 if self.ui.verbose or source == 'bundle':
1425 self.ui.status(_("%d changesets found\n") % len(nodes))
1425 self.ui.status(_("%d changesets found\n") % len(nodes))
1426 if self.ui.debugflag:
1426 if self.ui.debugflag:
1427 self.ui.debug("list of changesets:\n")
1427 self.ui.debug("list of changesets:\n")
1428 for node in nodes:
1428 for node in nodes:
1429 self.ui.debug("%s\n" % hex(node))
1429 self.ui.debug("%s\n" % hex(node))
1430
1430
1431 def changegroupsubset(self, bases, heads, source, extranodes=None):
1431 def changegroupsubset(self, bases, heads, source, extranodes=None):
1432 """Compute a changegroup consisting of all the nodes that are
1432 """Compute a changegroup consisting of all the nodes that are
1433 descendents of any of the bases and ancestors of any of the heads.
1433 descendents of any of the bases and ancestors of any of the heads.
1434 Return a chunkbuffer object whose read() method will return
1434 Return a chunkbuffer object whose read() method will return
1435 successive changegroup chunks.
1435 successive changegroup chunks.
1436
1436
1437 It is fairly complex as determining which filenodes and which
1437 It is fairly complex as determining which filenodes and which
1438 manifest nodes need to be included for the changeset to be complete
1438 manifest nodes need to be included for the changeset to be complete
1439 is non-trivial.
1439 is non-trivial.
1440
1440
1441 Another wrinkle is doing the reverse, figuring out which changeset in
1441 Another wrinkle is doing the reverse, figuring out which changeset in
1442 the changegroup a particular filenode or manifestnode belongs to.
1442 the changegroup a particular filenode or manifestnode belongs to.
1443
1443
1444 The caller can specify some nodes that must be included in the
1444 The caller can specify some nodes that must be included in the
1445 changegroup using the extranodes argument. It should be a dict
1445 changegroup using the extranodes argument. It should be a dict
1446 where the keys are the filenames (or 1 for the manifest), and the
1446 where the keys are the filenames (or 1 for the manifest), and the
1447 values are lists of (node, linknode) tuples, where node is a wanted
1447 values are lists of (node, linknode) tuples, where node is a wanted
1448 node and linknode is the changelog node that should be transmitted as
1448 node and linknode is the changelog node that should be transmitted as
1449 the linkrev.
1449 the linkrev.
1450 """
1450 """
1451
1451
1452 # Set up some initial variables
1452 # Set up some initial variables
1453 # Make it easy to refer to self.changelog
1453 # Make it easy to refer to self.changelog
1454 cl = self.changelog
1454 cl = self.changelog
1455 # Compute the list of changesets in this changegroup.
1455 # Compute the list of changesets in this changegroup.
1456 # Some bases may turn out to be superfluous, and some heads may be
1456 # Some bases may turn out to be superfluous, and some heads may be
1457 # too. nodesbetween will return the minimal set of bases and heads
1457 # too. nodesbetween will return the minimal set of bases and heads
1458 # necessary to re-create the changegroup.
1458 # necessary to re-create the changegroup.
1459 if not bases:
1459 if not bases:
1460 bases = [nullid]
1460 bases = [nullid]
1461 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1461 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1462
1462
1463 if extranodes is None:
1463 if extranodes is None:
1464 # can we go through the fast path ?
1464 # can we go through the fast path ?
1465 heads.sort()
1465 heads.sort()
1466 allheads = self.heads()
1466 allheads = self.heads()
1467 allheads.sort()
1467 allheads.sort()
1468 if heads == allheads:
1468 if heads == allheads:
1469 return self._changegroup(msng_cl_lst, source)
1469 return self._changegroup(msng_cl_lst, source)
1470
1470
1471 # slow path
1471 # slow path
1472 self.hook('preoutgoing', throw=True, source=source)
1472 self.hook('preoutgoing', throw=True, source=source)
1473
1473
1474 self.changegroupinfo(msng_cl_lst, source)
1474 self.changegroupinfo(msng_cl_lst, source)
1475
1475
1476 # We assume that all ancestors of bases are known
1476 # We assume that all ancestors of bases are known
1477 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1477 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1478
1478
1479 # Make it easy to refer to self.manifest
1479 # Make it easy to refer to self.manifest
1480 mnfst = self.manifest
1480 mnfst = self.manifest
1481 # We don't know which manifests are missing yet
1481 # We don't know which manifests are missing yet
1482 msng_mnfst_set = {}
1482 msng_mnfst_set = {}
1483 # Nor do we know which filenodes are missing.
1483 # Nor do we know which filenodes are missing.
1484 msng_filenode_set = {}
1484 msng_filenode_set = {}
1485
1485
1486 # A changeset always belongs to itself, so the changenode lookup
1486 # A changeset always belongs to itself, so the changenode lookup
1487 # function for a changenode is identity.
1487 # function for a changenode is identity.
1488 def identity(x):
1488 def identity(x):
1489 return x
1489 return x
1490
1490
1491 # A function generating function that sets up the initial environment
1491 # A function generating function that sets up the initial environment
1492 # the inner function.
1492 # the inner function.
1493 def filenode_collector(changedfiles):
1493 def filenode_collector(changedfiles):
1494 # This gathers information from each manifestnode included in the
1494 # This gathers information from each manifestnode included in the
1495 # changegroup about which filenodes the manifest node references
1495 # changegroup about which filenodes the manifest node references
1496 # so we can include those in the changegroup too.
1496 # so we can include those in the changegroup too.
1497 #
1497 #
1498 # It also remembers which changenode each filenode belongs to. It
1498 # It also remembers which changenode each filenode belongs to. It
1499 # does this by assuming the a filenode belongs to the changenode
1499 # does this by assuming the a filenode belongs to the changenode
1500 # the first manifest that references it belongs to.
1500 # the first manifest that references it belongs to.
1501 def collect_msng_filenodes(mnfstnode):
1501 def collect_msng_filenodes(mnfstnode):
1502 r = mnfst.rev(mnfstnode)
1502 r = mnfst.rev(mnfstnode)
1503 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1503 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1504 # If the previous rev is one of the parents,
1504 # If the previous rev is one of the parents,
1505 # we only need to see a diff.
1505 # we only need to see a diff.
1506 deltamf = mnfst.readdelta(mnfstnode)
1506 deltamf = mnfst.readdelta(mnfstnode)
1507 # For each line in the delta
1507 # For each line in the delta
1508 for f, fnode in deltamf.iteritems():
1508 for f, fnode in deltamf.iteritems():
1509 # And if the file is in the list of files we care
1509 # And if the file is in the list of files we care
1510 # about.
1510 # about.
1511 if f in changedfiles:
1511 if f in changedfiles:
1512 # Get the changenode this manifest belongs to
1512 # Get the changenode this manifest belongs to
1513 clnode = msng_mnfst_set[mnfstnode]
1513 clnode = msng_mnfst_set[mnfstnode]
1514 # Create the set of filenodes for the file if
1514 # Create the set of filenodes for the file if
1515 # there isn't one already.
1515 # there isn't one already.
1516 ndset = msng_filenode_set.setdefault(f, {})
1516 ndset = msng_filenode_set.setdefault(f, {})
1517 # And set the filenode's changelog node to the
1517 # And set the filenode's changelog node to the
1518 # manifest's if it hasn't been set already.
1518 # manifest's if it hasn't been set already.
1519 ndset.setdefault(fnode, clnode)
1519 ndset.setdefault(fnode, clnode)
1520 else:
1520 else:
1521 # Otherwise we need a full manifest.
1521 # Otherwise we need a full manifest.
1522 m = mnfst.read(mnfstnode)
1522 m = mnfst.read(mnfstnode)
1523 # For every file in we care about.
1523 # For every file in we care about.
1524 for f in changedfiles:
1524 for f in changedfiles:
1525 fnode = m.get(f, None)
1525 fnode = m.get(f, None)
1526 # If it's in the manifest
1526 # If it's in the manifest
1527 if fnode is not None:
1527 if fnode is not None:
1528 # See comments above.
1528 # See comments above.
1529 clnode = msng_mnfst_set[mnfstnode]
1529 clnode = msng_mnfst_set[mnfstnode]
1530 ndset = msng_filenode_set.setdefault(f, {})
1530 ndset = msng_filenode_set.setdefault(f, {})
1531 ndset.setdefault(fnode, clnode)
1531 ndset.setdefault(fnode, clnode)
1532 return collect_msng_filenodes
1532 return collect_msng_filenodes
1533
1533
1534 # If we determine that a particular file or manifest node must be a
1534 # If we determine that a particular file or manifest node must be a
1535 # node that the recipient of the changegroup will already have, we can
1535 # node that the recipient of the changegroup will already have, we can
1536 # also assume the recipient will have all the parents. This function
1536 # also assume the recipient will have all the parents. This function
1537 # prunes them from the set of missing nodes.
1537 # prunes them from the set of missing nodes.
1538 def prune(revlog, missingnodes):
1538 def prune(revlog, missingnodes):
1539 hasset = set()
1539 hasset = set()
1540 # If a 'missing' filenode thinks it belongs to a changenode we
1540 # If a 'missing' filenode thinks it belongs to a changenode we
1541 # assume the recipient must have, then the recipient must have
1541 # assume the recipient must have, then the recipient must have
1542 # that filenode.
1542 # that filenode.
1543 for n in missingnodes:
1543 for n in missingnodes:
1544 clrev = revlog.linkrev(revlog.rev(n))
1544 clrev = revlog.linkrev(revlog.rev(n))
1545 if clrev in commonrevs:
1545 if clrev in commonrevs:
1546 hasset.add(n)
1546 hasset.add(n)
1547 for n in hasset:
1547 for n in hasset:
1548 missingnodes.pop(n, None)
1548 missingnodes.pop(n, None)
1549 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1549 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1550 missingnodes.pop(revlog.node(r), None)
1550 missingnodes.pop(revlog.node(r), None)
1551
1551
1552 # Add the nodes that were explicitly requested.
1552 # Add the nodes that were explicitly requested.
1553 def add_extra_nodes(name, nodes):
1553 def add_extra_nodes(name, nodes):
1554 if not extranodes or name not in extranodes:
1554 if not extranodes or name not in extranodes:
1555 return
1555 return
1556
1556
1557 for node, linknode in extranodes[name]:
1557 for node, linknode in extranodes[name]:
1558 if node not in nodes:
1558 if node not in nodes:
1559 nodes[node] = linknode
1559 nodes[node] = linknode
1560
1560
1561 # Now that we have all theses utility functions to help out and
1561 # Now that we have all theses utility functions to help out and
1562 # logically divide up the task, generate the group.
1562 # logically divide up the task, generate the group.
1563 def gengroup():
1563 def gengroup():
1564 # The set of changed files starts empty.
1564 # The set of changed files starts empty.
1565 changedfiles = set()
1565 changedfiles = set()
1566 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1566 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1567
1567
1568 # Create a changenode group generator that will call our functions
1568 # Create a changenode group generator that will call our functions
1569 # back to lookup the owning changenode and collect information.
1569 # back to lookup the owning changenode and collect information.
1570 group = cl.group(msng_cl_lst, identity, collect)
1570 group = cl.group(msng_cl_lst, identity, collect)
1571 for cnt, chnk in enumerate(group):
1571 for cnt, chnk in enumerate(group):
1572 yield chnk
1572 yield chnk
1573 # revlog.group yields three entries per node, so
1573 # revlog.group yields three entries per node, so
1574 # dividing by 3 gives an approximation of how many
1574 # dividing by 3 gives an approximation of how many
1575 # nodes have been processed.
1575 # nodes have been processed.
1576 self.ui.progress(_('bundling'), cnt / 3,
1576 self.ui.progress(_('bundling'), cnt / 3,
1577 unit=_('changesets'))
1577 unit=_('changesets'))
1578 changecount = cnt / 3
1578 changecount = cnt / 3
1579 self.ui.progress(_('bundling'), None)
1579 self.ui.progress(_('bundling'), None)
1580
1580
1581 prune(mnfst, msng_mnfst_set)
1581 prune(mnfst, msng_mnfst_set)
1582 add_extra_nodes(1, msng_mnfst_set)
1582 add_extra_nodes(1, msng_mnfst_set)
1583 msng_mnfst_lst = msng_mnfst_set.keys()
1583 msng_mnfst_lst = msng_mnfst_set.keys()
1584 # Sort the manifestnodes by revision number.
1584 # Sort the manifestnodes by revision number.
1585 msng_mnfst_lst.sort(key=mnfst.rev)
1585 msng_mnfst_lst.sort(key=mnfst.rev)
1586 # Create a generator for the manifestnodes that calls our lookup
1586 # Create a generator for the manifestnodes that calls our lookup
1587 # and data collection functions back.
1587 # and data collection functions back.
1588 group = mnfst.group(msng_mnfst_lst,
1588 group = mnfst.group(msng_mnfst_lst,
1589 lambda mnode: msng_mnfst_set[mnode],
1589 lambda mnode: msng_mnfst_set[mnode],
1590 filenode_collector(changedfiles))
1590 filenode_collector(changedfiles))
1591 efiles = {}
1591 efiles = {}
1592 for cnt, chnk in enumerate(group):
1592 for cnt, chnk in enumerate(group):
1593 if cnt % 3 == 1:
1593 if cnt % 3 == 1:
1594 mnode = chnk[:20]
1594 mnode = chnk[:20]
1595 efiles.update(mnfst.readdelta(mnode))
1595 efiles.update(mnfst.readdelta(mnode))
1596 yield chnk
1596 yield chnk
1597 # see above comment for why we divide by 3
1597 # see above comment for why we divide by 3
1598 self.ui.progress(_('bundling'), cnt / 3,
1598 self.ui.progress(_('bundling'), cnt / 3,
1599 unit=_('manifests'), total=changecount)
1599 unit=_('manifests'), total=changecount)
1600 self.ui.progress(_('bundling'), None)
1600 self.ui.progress(_('bundling'), None)
1601 efiles = len(efiles)
1601 efiles = len(efiles)
1602
1602
1603 # These are no longer needed, dereference and toss the memory for
1603 # These are no longer needed, dereference and toss the memory for
1604 # them.
1604 # them.
1605 msng_mnfst_lst = None
1605 msng_mnfst_lst = None
1606 msng_mnfst_set.clear()
1606 msng_mnfst_set.clear()
1607
1607
1608 if extranodes:
1608 if extranodes:
1609 for fname in extranodes:
1609 for fname in extranodes:
1610 if isinstance(fname, int):
1610 if isinstance(fname, int):
1611 continue
1611 continue
1612 msng_filenode_set.setdefault(fname, {})
1612 msng_filenode_set.setdefault(fname, {})
1613 changedfiles.add(fname)
1613 changedfiles.add(fname)
1614 # Go through all our files in order sorted by name.
1614 # Go through all our files in order sorted by name.
1615 for idx, fname in enumerate(sorted(changedfiles)):
1615 for idx, fname in enumerate(sorted(changedfiles)):
1616 filerevlog = self.file(fname)
1616 filerevlog = self.file(fname)
1617 if not len(filerevlog):
1617 if not len(filerevlog):
1618 raise util.Abort(_("empty or missing revlog for %s") % fname)
1618 raise util.Abort(_("empty or missing revlog for %s") % fname)
1619 # Toss out the filenodes that the recipient isn't really
1619 # Toss out the filenodes that the recipient isn't really
1620 # missing.
1620 # missing.
1621 missingfnodes = msng_filenode_set.pop(fname, {})
1621 missingfnodes = msng_filenode_set.pop(fname, {})
1622 prune(filerevlog, missingfnodes)
1622 prune(filerevlog, missingfnodes)
1623 add_extra_nodes(fname, missingfnodes)
1623 add_extra_nodes(fname, missingfnodes)
1624 # If any filenodes are left, generate the group for them,
1624 # If any filenodes are left, generate the group for them,
1625 # otherwise don't bother.
1625 # otherwise don't bother.
1626 if missingfnodes:
1626 if missingfnodes:
1627 yield changegroup.chunkheader(len(fname))
1627 yield changegroup.chunkheader(len(fname))
1628 yield fname
1628 yield fname
1629 # Sort the filenodes by their revision # (topological order)
1629 # Sort the filenodes by their revision # (topological order)
1630 nodeiter = list(missingfnodes)
1630 nodeiter = list(missingfnodes)
1631 nodeiter.sort(key=filerevlog.rev)
1631 nodeiter.sort(key=filerevlog.rev)
1632 # Create a group generator and only pass in a changenode
1632 # Create a group generator and only pass in a changenode
1633 # lookup function as we need to collect no information
1633 # lookup function as we need to collect no information
1634 # from filenodes.
1634 # from filenodes.
1635 group = filerevlog.group(nodeiter,
1635 group = filerevlog.group(nodeiter,
1636 lambda fnode: missingfnodes[fnode])
1636 lambda fnode: missingfnodes[fnode])
1637 for chnk in group:
1637 for chnk in group:
1638 # even though we print the same progress on
1638 # even though we print the same progress on
1639 # most loop iterations, put the progress call
1639 # most loop iterations, put the progress call
1640 # here so that time estimates (if any) can be updated
1640 # here so that time estimates (if any) can be updated
1641 self.ui.progress(
1641 self.ui.progress(
1642 _('bundling'), idx, item=fname,
1642 _('bundling'), idx, item=fname,
1643 unit=_('files'), total=efiles)
1643 unit=_('files'), total=efiles)
1644 yield chnk
1644 yield chnk
1645 # Signal that no more groups are left.
1645 # Signal that no more groups are left.
1646 yield changegroup.closechunk()
1646 yield changegroup.closechunk()
1647 self.ui.progress(_('bundling'), None)
1647 self.ui.progress(_('bundling'), None)
1648
1648
1649 if msng_cl_lst:
1649 if msng_cl_lst:
1650 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1650 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1651
1651
1652 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1652 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1653
1653
1654 def changegroup(self, basenodes, source):
1654 def changegroup(self, basenodes, source):
1655 # to avoid a race we use changegroupsubset() (issue1320)
1655 # to avoid a race we use changegroupsubset() (issue1320)
1656 return self.changegroupsubset(basenodes, self.heads(), source)
1656 return self.changegroupsubset(basenodes, self.heads(), source)
1657
1657
1658 def _changegroup(self, nodes, source):
1658 def _changegroup(self, nodes, source):
1659 """Compute the changegroup of all nodes that we have that a recipient
1659 """Compute the changegroup of all nodes that we have that a recipient
1660 doesn't. Return a chunkbuffer object whose read() method will return
1660 doesn't. Return a chunkbuffer object whose read() method will return
1661 successive changegroup chunks.
1661 successive changegroup chunks.
1662
1662
1663 This is much easier than the previous function as we can assume that
1663 This is much easier than the previous function as we can assume that
1664 the recipient has any changenode we aren't sending them.
1664 the recipient has any changenode we aren't sending them.
1665
1665
1666 nodes is the set of nodes to send"""
1666 nodes is the set of nodes to send"""
1667
1667
1668 self.hook('preoutgoing', throw=True, source=source)
1668 self.hook('preoutgoing', throw=True, source=source)
1669
1669
1670 cl = self.changelog
1670 cl = self.changelog
1671 revset = set([cl.rev(n) for n in nodes])
1671 revset = set([cl.rev(n) for n in nodes])
1672 self.changegroupinfo(nodes, source)
1672 self.changegroupinfo(nodes, source)
1673
1673
1674 def identity(x):
1674 def identity(x):
1675 return x
1675 return x
1676
1676
1677 def gennodelst(log):
1677 def gennodelst(log):
1678 for r in log:
1678 for r in log:
1679 if log.linkrev(r) in revset:
1679 if log.linkrev(r) in revset:
1680 yield log.node(r)
1680 yield log.node(r)
1681
1681
1682 def lookuplinkrev_func(revlog):
1682 def lookuplinkrev_func(revlog):
1683 def lookuplinkrev(n):
1683 def lookuplinkrev(n):
1684 return cl.node(revlog.linkrev(revlog.rev(n)))
1684 return cl.node(revlog.linkrev(revlog.rev(n)))
1685 return lookuplinkrev
1685 return lookuplinkrev
1686
1686
1687 def gengroup():
1687 def gengroup():
1688 '''yield a sequence of changegroup chunks (strings)'''
1688 '''yield a sequence of changegroup chunks (strings)'''
1689 # construct a list of all changed files
1689 # construct a list of all changed files
1690 changedfiles = set()
1690 changedfiles = set()
1691 mmfs = {}
1691 mmfs = {}
1692 collect = changegroup.collector(cl, mmfs, changedfiles)
1692 collect = changegroup.collector(cl, mmfs, changedfiles)
1693
1693
1694 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1694 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1695 # revlog.group yields three entries per node, so
1695 # revlog.group yields three entries per node, so
1696 # dividing by 3 gives an approximation of how many
1696 # dividing by 3 gives an approximation of how many
1697 # nodes have been processed.
1697 # nodes have been processed.
1698 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1698 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1699 yield chnk
1699 yield chnk
1700 changecount = cnt / 3
1700 changecount = cnt / 3
1701 self.ui.progress(_('bundling'), None)
1701 self.ui.progress(_('bundling'), None)
1702
1702
1703 mnfst = self.manifest
1703 mnfst = self.manifest
1704 nodeiter = gennodelst(mnfst)
1704 nodeiter = gennodelst(mnfst)
1705 efiles = {}
1705 efiles = {}
1706 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1706 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1707 lookuplinkrev_func(mnfst))):
1707 lookuplinkrev_func(mnfst))):
1708 if cnt % 3 == 1:
1708 if cnt % 3 == 1:
1709 mnode = chnk[:20]
1709 mnode = chnk[:20]
1710 efiles.update(mnfst.readdelta(mnode))
1710 efiles.update(mnfst.readdelta(mnode))
1711 # see above comment for why we divide by 3
1711 # see above comment for why we divide by 3
1712 self.ui.progress(_('bundling'), cnt / 3,
1712 self.ui.progress(_('bundling'), cnt / 3,
1713 unit=_('manifests'), total=changecount)
1713 unit=_('manifests'), total=changecount)
1714 yield chnk
1714 yield chnk
1715 efiles = len(efiles)
1715 efiles = len(efiles)
1716 self.ui.progress(_('bundling'), None)
1716 self.ui.progress(_('bundling'), None)
1717
1717
1718 for idx, fname in enumerate(sorted(changedfiles)):
1718 for idx, fname in enumerate(sorted(changedfiles)):
1719 filerevlog = self.file(fname)
1719 filerevlog = self.file(fname)
1720 if not len(filerevlog):
1720 if not len(filerevlog):
1721 raise util.Abort(_("empty or missing revlog for %s") % fname)
1721 raise util.Abort(_("empty or missing revlog for %s") % fname)
1722 nodeiter = gennodelst(filerevlog)
1722 nodeiter = gennodelst(filerevlog)
1723 nodeiter = list(nodeiter)
1723 nodeiter = list(nodeiter)
1724 if nodeiter:
1724 if nodeiter:
1725 yield changegroup.chunkheader(len(fname))
1725 yield changegroup.chunkheader(len(fname))
1726 yield fname
1726 yield fname
1727 lookup = lookuplinkrev_func(filerevlog)
1727 lookup = lookuplinkrev_func(filerevlog)
1728 for chnk in filerevlog.group(nodeiter, lookup):
1728 for chnk in filerevlog.group(nodeiter, lookup):
1729 self.ui.progress(
1729 self.ui.progress(
1730 _('bundling'), idx, item=fname,
1730 _('bundling'), idx, item=fname,
1731 total=efiles, unit=_('files'))
1731 total=efiles, unit=_('files'))
1732 yield chnk
1732 yield chnk
1733 self.ui.progress(_('bundling'), None)
1733 self.ui.progress(_('bundling'), None)
1734
1734
1735 yield changegroup.closechunk()
1735 yield changegroup.closechunk()
1736
1736
1737 if nodes:
1737 if nodes:
1738 self.hook('outgoing', node=hex(nodes[0]), source=source)
1738 self.hook('outgoing', node=hex(nodes[0]), source=source)
1739
1739
1740 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1740 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1741
1741
1742 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1742 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1743 """Add the changegroup returned by source.read() to this repo.
1743 """Add the changegroup returned by source.read() to this repo.
1744 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1744 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1745 the URL of the repo where this changegroup is coming from.
1745 the URL of the repo where this changegroup is coming from.
1746 If lock is not None, the function takes ownership of the lock
1746 If lock is not None, the function takes ownership of the lock
1747 and releases it after the changegroup is added.
1747 and releases it after the changegroup is added.
1748
1748
1749 Return an integer summarizing the change to this repo:
1749 Return an integer summarizing the change to this repo:
1750 - nothing changed or no source: 0
1750 - nothing changed or no source: 0
1751 - more heads than before: 1+added heads (2..n)
1751 - more heads than before: 1+added heads (2..n)
1752 - fewer heads than before: -1-removed heads (-2..-n)
1752 - fewer heads than before: -1-removed heads (-2..-n)
1753 - number of heads stays the same: 1
1753 - number of heads stays the same: 1
1754 """
1754 """
1755 def csmap(x):
1755 def csmap(x):
1756 self.ui.debug("add changeset %s\n" % short(x))
1756 self.ui.debug("add changeset %s\n" % short(x))
1757 return len(cl)
1757 return len(cl)
1758
1758
1759 def revmap(x):
1759 def revmap(x):
1760 return cl.rev(x)
1760 return cl.rev(x)
1761
1761
1762 if not source:
1762 if not source:
1763 return 0
1763 return 0
1764
1764
1765 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1765 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1766
1766
1767 changesets = files = revisions = 0
1767 changesets = files = revisions = 0
1768 efiles = set()
1768 efiles = set()
1769
1769
1770 # write changelog data to temp files so concurrent readers will not see
1770 # write changelog data to temp files so concurrent readers will not see
1771 # inconsistent view
1771 # inconsistent view
1772 cl = self.changelog
1772 cl = self.changelog
1773 cl.delayupdate()
1773 cl.delayupdate()
1774 oldheads = len(cl.heads())
1774 oldheads = len(cl.heads())
1775
1775
1776 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1776 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1777 try:
1777 try:
1778 trp = weakref.proxy(tr)
1778 trp = weakref.proxy(tr)
1779 # pull off the changeset group
1779 # pull off the changeset group
1780 self.ui.status(_("adding changesets\n"))
1780 self.ui.status(_("adding changesets\n"))
1781 clstart = len(cl)
1781 clstart = len(cl)
1782 class prog(object):
1782 class prog(object):
1783 step = _('changesets')
1783 step = _('changesets')
1784 count = 1
1784 count = 1
1785 ui = self.ui
1785 ui = self.ui
1786 total = None
1786 total = None
1787 def __call__(self):
1787 def __call__(self):
1788 self.ui.progress(self.step, self.count, unit=_('chunks'),
1788 self.ui.progress(self.step, self.count, unit=_('chunks'),
1789 total=self.total)
1789 total=self.total)
1790 self.count += 1
1790 self.count += 1
1791 pr = prog()
1791 pr = prog()
1792 source.callback = pr
1792 source.callback = pr
1793
1793
1794 if (cl.addgroup(source, csmap, trp) is None
1794 if (cl.addgroup(source, csmap, trp) is None
1795 and not emptyok):
1795 and not emptyok):
1796 raise util.Abort(_("received changelog group is empty"))
1796 raise util.Abort(_("received changelog group is empty"))
1797 clend = len(cl)
1797 clend = len(cl)
1798 changesets = clend - clstart
1798 changesets = clend - clstart
1799 for c in xrange(clstart, clend):
1799 for c in xrange(clstart, clend):
1800 efiles.update(self[c].files())
1800 efiles.update(self[c].files())
1801 efiles = len(efiles)
1801 efiles = len(efiles)
1802 self.ui.progress(_('changesets'), None)
1802 self.ui.progress(_('changesets'), None)
1803
1803
1804 # pull off the manifest group
1804 # pull off the manifest group
1805 self.ui.status(_("adding manifests\n"))
1805 self.ui.status(_("adding manifests\n"))
1806 pr.step = _('manifests')
1806 pr.step = _('manifests')
1807 pr.count = 1
1807 pr.count = 1
1808 pr.total = changesets # manifests <= changesets
1808 pr.total = changesets # manifests <= changesets
1809 # no need to check for empty manifest group here:
1809 # no need to check for empty manifest group here:
1810 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1810 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1811 # no new manifest will be created and the manifest group will
1811 # no new manifest will be created and the manifest group will
1812 # be empty during the pull
1812 # be empty during the pull
1813 self.manifest.addgroup(source, revmap, trp)
1813 self.manifest.addgroup(source, revmap, trp)
1814 self.ui.progress(_('manifests'), None)
1814 self.ui.progress(_('manifests'), None)
1815
1815
1816 needfiles = {}
1816 needfiles = {}
1817 if self.ui.configbool('server', 'validate', default=False):
1817 if self.ui.configbool('server', 'validate', default=False):
1818 # validate incoming csets have their manifests
1818 # validate incoming csets have their manifests
1819 for cset in xrange(clstart, clend):
1819 for cset in xrange(clstart, clend):
1820 mfest = self.changelog.read(self.changelog.node(cset))[0]
1820 mfest = self.changelog.read(self.changelog.node(cset))[0]
1821 mfest = self.manifest.readdelta(mfest)
1821 mfest = self.manifest.readdelta(mfest)
1822 # store file nodes we must see
1822 # store file nodes we must see
1823 for f, n in mfest.iteritems():
1823 for f, n in mfest.iteritems():
1824 needfiles.setdefault(f, set()).add(n)
1824 needfiles.setdefault(f, set()).add(n)
1825
1825
1826 # process the files
1826 # process the files
1827 self.ui.status(_("adding file changes\n"))
1827 self.ui.status(_("adding file changes\n"))
1828 pr.step = 'files'
1828 pr.step = 'files'
1829 pr.count = 1
1829 pr.count = 1
1830 pr.total = efiles
1830 pr.total = efiles
1831 source.callback = None
1831 source.callback = None
1832
1832
1833 while 1:
1833 while 1:
1834 f = source.chunk()
1834 f = source.chunk()
1835 if not f:
1835 if not f:
1836 break
1836 break
1837 self.ui.debug("adding %s revisions\n" % f)
1837 self.ui.debug("adding %s revisions\n" % f)
1838 pr()
1838 pr()
1839 fl = self.file(f)
1839 fl = self.file(f)
1840 o = len(fl)
1840 o = len(fl)
1841 if fl.addgroup(source, revmap, trp) is None:
1841 if fl.addgroup(source, revmap, trp) is None:
1842 raise util.Abort(_("received file revlog group is empty"))
1842 raise util.Abort(_("received file revlog group is empty"))
1843 revisions += len(fl) - o
1843 revisions += len(fl) - o
1844 files += 1
1844 files += 1
1845 if f in needfiles:
1845 if f in needfiles:
1846 needs = needfiles[f]
1846 needs = needfiles[f]
1847 for new in xrange(o, len(fl)):
1847 for new in xrange(o, len(fl)):
1848 n = fl.node(new)
1848 n = fl.node(new)
1849 if n in needs:
1849 if n in needs:
1850 needs.remove(n)
1850 needs.remove(n)
1851 if not needs:
1851 if not needs:
1852 del needfiles[f]
1852 del needfiles[f]
1853 self.ui.progress(_('files'), None)
1853 self.ui.progress(_('files'), None)
1854
1854
1855 for f, needs in needfiles.iteritems():
1855 for f, needs in needfiles.iteritems():
1856 fl = self.file(f)
1856 fl = self.file(f)
1857 for n in needs:
1857 for n in needs:
1858 try:
1858 try:
1859 fl.rev(n)
1859 fl.rev(n)
1860 except error.LookupError:
1860 except error.LookupError:
1861 raise util.Abort(
1861 raise util.Abort(
1862 _('missing file data for %s:%s - run hg verify') %
1862 _('missing file data for %s:%s - run hg verify') %
1863 (f, hex(n)))
1863 (f, hex(n)))
1864
1864
1865 newheads = len(cl.heads())
1865 newheads = len(cl.heads())
1866 heads = ""
1866 heads = ""
1867 if oldheads and newheads != oldheads:
1867 if oldheads and newheads != oldheads:
1868 heads = _(" (%+d heads)") % (newheads - oldheads)
1868 heads = _(" (%+d heads)") % (newheads - oldheads)
1869
1869
1870 self.ui.status(_("added %d changesets"
1870 self.ui.status(_("added %d changesets"
1871 " with %d changes to %d files%s\n")
1871 " with %d changes to %d files%s\n")
1872 % (changesets, revisions, files, heads))
1872 % (changesets, revisions, files, heads))
1873
1873
1874 if changesets > 0:
1874 if changesets > 0:
1875 p = lambda: cl.writepending() and self.root or ""
1875 p = lambda: cl.writepending() and self.root or ""
1876 self.hook('pretxnchangegroup', throw=True,
1876 self.hook('pretxnchangegroup', throw=True,
1877 node=hex(cl.node(clstart)), source=srctype,
1877 node=hex(cl.node(clstart)), source=srctype,
1878 url=url, pending=p)
1878 url=url, pending=p)
1879
1879
1880 # make changelog see real files again
1880 # make changelog see real files again
1881 cl.finalize(trp)
1881 cl.finalize(trp)
1882
1882
1883 tr.close()
1883 tr.close()
1884 finally:
1884 finally:
1885 tr.release()
1885 tr.release()
1886 if lock:
1886 if lock:
1887 lock.release()
1887 lock.release()
1888
1888
1889 if changesets > 0:
1889 if changesets > 0:
1890 # forcefully update the on-disk branch cache
1890 # forcefully update the on-disk branch cache
1891 self.ui.debug("updating the branch cache\n")
1891 self.ui.debug("updating the branch cache\n")
1892 self.updatebranchcache()
1892 self.updatebranchcache()
1893 self.hook("changegroup", node=hex(cl.node(clstart)),
1893 self.hook("changegroup", node=hex(cl.node(clstart)),
1894 source=srctype, url=url)
1894 source=srctype, url=url)
1895
1895
1896 for i in xrange(clstart, clend):
1896 for i in xrange(clstart, clend):
1897 self.hook("incoming", node=hex(cl.node(i)),
1897 self.hook("incoming", node=hex(cl.node(i)),
1898 source=srctype, url=url)
1898 source=srctype, url=url)
1899
1899
1900 # FIXME - why does this care about tip?
1900 # FIXME - why does this care about tip?
1901 if newheads == oldheads:
1901 if newheads == oldheads:
1902 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1902 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1903
1903
1904 # never return 0 here:
1904 # never return 0 here:
1905 if newheads < oldheads:
1905 if newheads < oldheads:
1906 return newheads - oldheads - 1
1906 return newheads - oldheads - 1
1907 else:
1907 else:
1908 return newheads - oldheads + 1
1908 return newheads - oldheads + 1
1909
1909
1910
1910
1911 def stream_in(self, remote, requirements):
1911 def stream_in(self, remote, requirements):
1912 lock = self.lock()
1912 lock = self.lock()
1913 try:
1913 try:
1914 fp = remote.stream_out()
1914 fp = remote.stream_out()
1915 l = fp.readline()
1915 l = fp.readline()
1916 try:
1916 try:
1917 resp = int(l)
1917 resp = int(l)
1918 except ValueError:
1918 except ValueError:
1919 raise error.ResponseError(
1919 raise error.ResponseError(
1920 _('Unexpected response from remote server:'), l)
1920 _('Unexpected response from remote server:'), l)
1921 if resp == 1:
1921 if resp == 1:
1922 raise util.Abort(_('operation forbidden by server'))
1922 raise util.Abort(_('operation forbidden by server'))
1923 elif resp == 2:
1923 elif resp == 2:
1924 raise util.Abort(_('locking the remote repository failed'))
1924 raise util.Abort(_('locking the remote repository failed'))
1925 elif resp != 0:
1925 elif resp != 0:
1926 raise util.Abort(_('the server sent an unknown error code'))
1926 raise util.Abort(_('the server sent an unknown error code'))
1927 self.ui.status(_('streaming all changes\n'))
1927 self.ui.status(_('streaming all changes\n'))
1928 l = fp.readline()
1928 l = fp.readline()
1929 try:
1929 try:
1930 total_files, total_bytes = map(int, l.split(' ', 1))
1930 total_files, total_bytes = map(int, l.split(' ', 1))
1931 except (ValueError, TypeError):
1931 except (ValueError, TypeError):
1932 raise error.ResponseError(
1932 raise error.ResponseError(
1933 _('Unexpected response from remote server:'), l)
1933 _('Unexpected response from remote server:'), l)
1934 self.ui.status(_('%d files to transfer, %s of data\n') %
1934 self.ui.status(_('%d files to transfer, %s of data\n') %
1935 (total_files, util.bytecount(total_bytes)))
1935 (total_files, util.bytecount(total_bytes)))
1936 start = time.time()
1936 start = time.time()
1937 for i in xrange(total_files):
1937 for i in xrange(total_files):
1938 # XXX doesn't support '\n' or '\r' in filenames
1938 # XXX doesn't support '\n' or '\r' in filenames
1939 l = fp.readline()
1939 l = fp.readline()
1940 try:
1940 try:
1941 name, size = l.split('\0', 1)
1941 name, size = l.split('\0', 1)
1942 size = int(size)
1942 size = int(size)
1943 except (ValueError, TypeError):
1943 except (ValueError, TypeError):
1944 raise error.ResponseError(
1944 raise error.ResponseError(
1945 _('Unexpected response from remote server:'), l)
1945 _('Unexpected response from remote server:'), l)
1946 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1946 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1947 # for backwards compat, name was partially encoded
1947 # for backwards compat, name was partially encoded
1948 ofp = self.sopener(store.decodedir(name), 'w')
1948 ofp = self.sopener(store.decodedir(name), 'w')
1949 for chunk in util.filechunkiter(fp, limit=size):
1949 for chunk in util.filechunkiter(fp, limit=size):
1950 ofp.write(chunk)
1950 ofp.write(chunk)
1951 ofp.close()
1951 ofp.close()
1952 elapsed = time.time() - start
1952 elapsed = time.time() - start
1953 if elapsed <= 0:
1953 if elapsed <= 0:
1954 elapsed = 0.001
1954 elapsed = 0.001
1955 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1955 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1956 (util.bytecount(total_bytes), elapsed,
1956 (util.bytecount(total_bytes), elapsed,
1957 util.bytecount(total_bytes / elapsed)))
1957 util.bytecount(total_bytes / elapsed)))
1958
1958
1959 # new requirements = old non-format requirements + new format-related
1959 # new requirements = old non-format requirements + new format-related
1960 # requirements from the streamed-in repository
1960 # requirements from the streamed-in repository
1961 requirements.update(set(self.requirements) - self.supportedformats)
1961 requirements.update(set(self.requirements) - self.supportedformats)
1962 self._applyrequirements(requirements)
1962 self._applyrequirements(requirements)
1963 self._writerequirements()
1963 self._writerequirements()
1964
1964
1965 self.invalidate()
1965 self.invalidate()
1966 return len(self.heads()) + 1
1966 return len(self.heads()) + 1
1967 finally:
1967 finally:
1968 lock.release()
1968 lock.release()
1969
1969
1970 def clone(self, remote, heads=[], stream=False):
1970 def clone(self, remote, heads=[], stream=False):
1971 '''clone remote repository.
1971 '''clone remote repository.
1972
1972
1973 keyword arguments:
1973 keyword arguments:
1974 heads: list of revs to clone (forces use of pull)
1974 heads: list of revs to clone (forces use of pull)
1975 stream: use streaming clone if possible'''
1975 stream: use streaming clone if possible'''
1976
1976
1977 # now, all clients that can request uncompressed clones can
1977 # now, all clients that can request uncompressed clones can
1978 # read repo formats supported by all servers that can serve
1978 # read repo formats supported by all servers that can serve
1979 # them.
1979 # them.
1980
1980
1981 # if revlog format changes, client will have to check version
1981 # if revlog format changes, client will have to check version
1982 # and format flags on "stream" capability, and use
1982 # and format flags on "stream" capability, and use
1983 # uncompressed only if compatible.
1983 # uncompressed only if compatible.
1984
1984
1985 if stream and not heads:
1985 if stream and not heads:
1986 # 'stream' means remote revlog format is revlogv1 only
1986 # 'stream' means remote revlog format is revlogv1 only
1987 if remote.capable('stream'):
1987 if remote.capable('stream'):
1988 return self.stream_in(remote, set(('revlogv1',)))
1988 return self.stream_in(remote, set(('revlogv1',)))
1989 # otherwise, 'streamreqs' contains the remote revlog format
1989 # otherwise, 'streamreqs' contains the remote revlog format
1990 streamreqs = remote.capable('streamreqs')
1990 streamreqs = remote.capable('streamreqs')
1991 if streamreqs:
1991 if streamreqs:
1992 streamreqs = set(streamreqs.split(','))
1992 streamreqs = set(streamreqs.split(','))
1993 # if we support it, stream in and adjust our requirements
1993 # if we support it, stream in and adjust our requirements
1994 if not streamreqs - self.supportedformats:
1994 if not streamreqs - self.supportedformats:
1995 return self.stream_in(remote, streamreqs)
1995 return self.stream_in(remote, streamreqs)
1996 return self.pull(remote, heads)
1996 return self.pull(remote, heads)
1997
1997
1998 def pushkey(self, namespace, key, old, new):
1998 def pushkey(self, namespace, key, old, new):
1999 return pushkey.push(self, namespace, key, old, new)
1999 return pushkey.push(self, namespace, key, old, new)
2000
2000
2001 def listkeys(self, namespace):
2001 def listkeys(self, namespace):
2002 return pushkey.list(self, namespace)
2002 return pushkey.list(self, namespace)
2003
2003
2004 # used to avoid circular references so destructors work
2004 # used to avoid circular references so destructors work
2005 def aftertrans(files):
2005 def aftertrans(files):
2006 renamefiles = [tuple(t) for t in files]
2006 renamefiles = [tuple(t) for t in files]
2007 def a():
2007 def a():
2008 for src, dest in renamefiles:
2008 for src, dest in renamefiles:
2009 util.rename(src, dest)
2009 util.rename(src, dest)
2010 return a
2010 return a
2011
2011
2012 def instance(ui, path, create):
2012 def instance(ui, path, create):
2013 return localrepository(ui, util.drop_scheme('file', path), create)
2013 return localrepository(ui, util.drop_scheme('file', path), create)
2014
2014
2015 def islocal(path):
2015 def islocal(path):
2016 return True
2016 return True
@@ -1,339 +1,357
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import osutil, util
9 import osutil, util
10 import os, stat
10 import os, stat
11
11
12 _sha = util.sha1
12 _sha = util.sha1
13
13
14 # This avoids a collision between a file named foo and a dir named
14 # This avoids a collision between a file named foo and a dir named
15 # foo.i or foo.d
15 # foo.i or foo.d
16 def encodedir(path):
16 def encodedir(path):
17 if not path.startswith('data/'):
17 if not path.startswith('data/'):
18 return path
18 return path
19 return (path
19 return (path
20 .replace(".hg/", ".hg.hg/")
20 .replace(".hg/", ".hg.hg/")
21 .replace(".i/", ".i.hg/")
21 .replace(".i/", ".i.hg/")
22 .replace(".d/", ".d.hg/"))
22 .replace(".d/", ".d.hg/"))
23
23
24 def decodedir(path):
24 def decodedir(path):
25 if not path.startswith('data/') or ".hg/" not in path:
25 if not path.startswith('data/') or ".hg/" not in path:
26 return path
26 return path
27 return (path
27 return (path
28 .replace(".d.hg/", ".d/")
28 .replace(".d.hg/", ".d/")
29 .replace(".i.hg/", ".i/")
29 .replace(".i.hg/", ".i/")
30 .replace(".hg.hg/", ".hg/"))
30 .replace(".hg.hg/", ".hg/"))
31
31
32 def _buildencodefun():
32 def _buildencodefun():
33 e = '_'
33 e = '_'
34 win_reserved = [ord(x) for x in '\\:*?"<>|']
34 win_reserved = [ord(x) for x in '\\:*?"<>|']
35 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
35 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
36 for x in (range(32) + range(126, 256) + win_reserved):
36 for x in (range(32) + range(126, 256) + win_reserved):
37 cmap[chr(x)] = "~%02x" % x
37 cmap[chr(x)] = "~%02x" % x
38 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
38 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
39 cmap[chr(x)] = e + chr(x).lower()
39 cmap[chr(x)] = e + chr(x).lower()
40 dmap = {}
40 dmap = {}
41 for k, v in cmap.iteritems():
41 for k, v in cmap.iteritems():
42 dmap[v] = k
42 dmap[v] = k
43 def decode(s):
43 def decode(s):
44 i = 0
44 i = 0
45 while i < len(s):
45 while i < len(s):
46 for l in xrange(1, 4):
46 for l in xrange(1, 4):
47 try:
47 try:
48 yield dmap[s[i:i + l]]
48 yield dmap[s[i:i + l]]
49 i += l
49 i += l
50 break
50 break
51 except KeyError:
51 except KeyError:
52 pass
52 pass
53 else:
53 else:
54 raise KeyError
54 raise KeyError
55 return (lambda s: "".join([cmap[c] for c in encodedir(s)]),
55 return (lambda s: "".join([cmap[c] for c in encodedir(s)]),
56 lambda s: decodedir("".join(list(decode(s)))))
56 lambda s: decodedir("".join(list(decode(s)))))
57
57
58 encodefilename, decodefilename = _buildencodefun()
58 encodefilename, decodefilename = _buildencodefun()
59
59
60 def _build_lower_encodefun():
60 def _build_lower_encodefun():
61 win_reserved = [ord(x) for x in '\\:*?"<>|']
61 win_reserved = [ord(x) for x in '\\:*?"<>|']
62 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
62 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
63 for x in (range(32) + range(126, 256) + win_reserved):
63 for x in (range(32) + range(126, 256) + win_reserved):
64 cmap[chr(x)] = "~%02x" % x
64 cmap[chr(x)] = "~%02x" % x
65 for x in range(ord("A"), ord("Z")+1):
65 for x in range(ord("A"), ord("Z")+1):
66 cmap[chr(x)] = chr(x).lower()
66 cmap[chr(x)] = chr(x).lower()
67 return lambda s: "".join([cmap[c] for c in s])
67 return lambda s: "".join([cmap[c] for c in s])
68
68
69 lowerencode = _build_lower_encodefun()
69 lowerencode = _build_lower_encodefun()
70
70
71 _windows_reserved_filenames = '''con prn aux nul
71 _windows_reserved_filenames = '''con prn aux nul
72 com1 com2 com3 com4 com5 com6 com7 com8 com9
72 com1 com2 com3 com4 com5 com6 com7 com8 com9
73 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
73 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
74 def _auxencode(path, dotencode):
74 def _auxencode(path, dotencode):
75 res = []
75 res = []
76 for n in path.split('/'):
76 for n in path.split('/'):
77 if n:
77 if n:
78 base = n.split('.')[0]
78 base = n.split('.')[0]
79 if base and (base in _windows_reserved_filenames):
79 if base and (base in _windows_reserved_filenames):
80 # encode third letter ('aux' -> 'au~78')
80 # encode third letter ('aux' -> 'au~78')
81 ec = "~%02x" % ord(n[2])
81 ec = "~%02x" % ord(n[2])
82 n = n[0:2] + ec + n[3:]
82 n = n[0:2] + ec + n[3:]
83 if n[-1] in '. ':
83 if n[-1] in '. ':
84 # encode last period or space ('foo...' -> 'foo..~2e')
84 # encode last period or space ('foo...' -> 'foo..~2e')
85 n = n[:-1] + "~%02x" % ord(n[-1])
85 n = n[:-1] + "~%02x" % ord(n[-1])
86 if dotencode and n[0] in '. ':
86 if dotencode and n[0] in '. ':
87 n = "~%02x" % ord(n[0]) + n[1:]
87 n = "~%02x" % ord(n[0]) + n[1:]
88 res.append(n)
88 res.append(n)
89 return '/'.join(res)
89 return '/'.join(res)
90
90
91 MAX_PATH_LEN_IN_HGSTORE = 120
91 MAX_PATH_LEN_IN_HGSTORE = 120
92 DIR_PREFIX_LEN = 8
92 DIR_PREFIX_LEN = 8
93 _MAX_SHORTENED_DIRS_LEN = 8 * (DIR_PREFIX_LEN + 1) - 4
93 _MAX_SHORTENED_DIRS_LEN = 8 * (DIR_PREFIX_LEN + 1) - 4
94 def _hybridencode(path, auxencode):
94 def _hybridencode(path, auxencode):
95 '''encodes path with a length limit
95 '''encodes path with a length limit
96
96
97 Encodes all paths that begin with 'data/', according to the following.
97 Encodes all paths that begin with 'data/', according to the following.
98
98
99 Default encoding (reversible):
99 Default encoding (reversible):
100
100
101 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
101 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
102 characters are encoded as '~xx', where xx is the two digit hex code
102 characters are encoded as '~xx', where xx is the two digit hex code
103 of the character (see encodefilename).
103 of the character (see encodefilename).
104 Relevant path components consisting of Windows reserved filenames are
104 Relevant path components consisting of Windows reserved filenames are
105 masked by encoding the third character ('aux' -> 'au~78', see auxencode).
105 masked by encoding the third character ('aux' -> 'au~78', see auxencode).
106
106
107 Hashed encoding (not reversible):
107 Hashed encoding (not reversible):
108
108
109 If the default-encoded path is longer than MAX_PATH_LEN_IN_HGSTORE, a
109 If the default-encoded path is longer than MAX_PATH_LEN_IN_HGSTORE, a
110 non-reversible hybrid hashing of the path is done instead.
110 non-reversible hybrid hashing of the path is done instead.
111 This encoding uses up to DIR_PREFIX_LEN characters of all directory
111 This encoding uses up to DIR_PREFIX_LEN characters of all directory
112 levels of the lowerencoded path, but not more levels than can fit into
112 levels of the lowerencoded path, but not more levels than can fit into
113 _MAX_SHORTENED_DIRS_LEN.
113 _MAX_SHORTENED_DIRS_LEN.
114 Then follows the filler followed by the sha digest of the full path.
114 Then follows the filler followed by the sha digest of the full path.
115 The filler is the beginning of the basename of the lowerencoded path
115 The filler is the beginning of the basename of the lowerencoded path
116 (the basename is everything after the last path separator). The filler
116 (the basename is everything after the last path separator). The filler
117 is as long as possible, filling in characters from the basename until
117 is as long as possible, filling in characters from the basename until
118 the encoded path has MAX_PATH_LEN_IN_HGSTORE characters (or all chars
118 the encoded path has MAX_PATH_LEN_IN_HGSTORE characters (or all chars
119 of the basename have been taken).
119 of the basename have been taken).
120 The extension (e.g. '.i' or '.d') is preserved.
120 The extension (e.g. '.i' or '.d') is preserved.
121
121
122 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
122 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
123 encoding was used.
123 encoding was used.
124 '''
124 '''
125 if not path.startswith('data/'):
125 if not path.startswith('data/'):
126 return path
126 return path
127 # escape directories ending with .i and .d
127 # escape directories ending with .i and .d
128 path = encodedir(path)
128 path = encodedir(path)
129 ndpath = path[len('data/'):]
129 ndpath = path[len('data/'):]
130 res = 'data/' + auxencode(encodefilename(ndpath))
130 res = 'data/' + auxencode(encodefilename(ndpath))
131 if len(res) > MAX_PATH_LEN_IN_HGSTORE:
131 if len(res) > MAX_PATH_LEN_IN_HGSTORE:
132 digest = _sha(path).hexdigest()
132 digest = _sha(path).hexdigest()
133 aep = auxencode(lowerencode(ndpath))
133 aep = auxencode(lowerencode(ndpath))
134 _root, ext = os.path.splitext(aep)
134 _root, ext = os.path.splitext(aep)
135 parts = aep.split('/')
135 parts = aep.split('/')
136 basename = parts[-1]
136 basename = parts[-1]
137 sdirs = []
137 sdirs = []
138 for p in parts[:-1]:
138 for p in parts[:-1]:
139 d = p[:DIR_PREFIX_LEN]
139 d = p[:DIR_PREFIX_LEN]
140 if d[-1] in '. ':
140 if d[-1] in '. ':
141 # Windows can't access dirs ending in period or space
141 # Windows can't access dirs ending in period or space
142 d = d[:-1] + '_'
142 d = d[:-1] + '_'
143 t = '/'.join(sdirs) + '/' + d
143 t = '/'.join(sdirs) + '/' + d
144 if len(t) > _MAX_SHORTENED_DIRS_LEN:
144 if len(t) > _MAX_SHORTENED_DIRS_LEN:
145 break
145 break
146 sdirs.append(d)
146 sdirs.append(d)
147 dirs = '/'.join(sdirs)
147 dirs = '/'.join(sdirs)
148 if len(dirs) > 0:
148 if len(dirs) > 0:
149 dirs += '/'
149 dirs += '/'
150 res = 'dh/' + dirs + digest + ext
150 res = 'dh/' + dirs + digest + ext
151 space_left = MAX_PATH_LEN_IN_HGSTORE - len(res)
151 space_left = MAX_PATH_LEN_IN_HGSTORE - len(res)
152 if space_left > 0:
152 if space_left > 0:
153 filler = basename[:space_left]
153 filler = basename[:space_left]
154 res = 'dh/' + dirs + filler + digest + ext
154 res = 'dh/' + dirs + filler + digest + ext
155 return res
155 return res
156
156
157 def _calcmode(path):
157 def _calcmode(path):
158 try:
158 try:
159 # files in .hg/ will be created using this mode
159 # files in .hg/ will be created using this mode
160 mode = os.stat(path).st_mode
160 mode = os.stat(path).st_mode
161 # avoid some useless chmods
161 # avoid some useless chmods
162 if (0777 & ~util.umask) == (0777 & mode):
162 if (0777 & ~util.umask) == (0777 & mode):
163 mode = None
163 mode = None
164 except OSError:
164 except OSError:
165 mode = None
165 mode = None
166 return mode
166 return mode
167
167
168 _data = 'data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
168 _data = 'data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
169
169
170 class basicstore(object):
170 class basicstore(object):
171 '''base class for local repository stores'''
171 '''base class for local repository stores'''
172 def __init__(self, path, opener, pathjoiner):
172 def __init__(self, path, opener, pathjoiner):
173 self.pathjoiner = pathjoiner
173 self.pathjoiner = pathjoiner
174 self.path = path
174 self.path = path
175 self.createmode = _calcmode(path)
175 self.createmode = _calcmode(path)
176 op = opener(self.path)
176 op = opener(self.path)
177 op.createmode = self.createmode
177 op.createmode = self.createmode
178 self.opener = lambda f, *args, **kw: op(encodedir(f), *args, **kw)
178 self.opener = lambda f, *args, **kw: op(encodedir(f), *args, **kw)
179
179
180 def join(self, f):
180 def join(self, f):
181 return self.pathjoiner(self.path, encodedir(f))
181 return self.pathjoiner(self.path, encodedir(f))
182
182
183 def _walk(self, relpath, recurse):
183 def _walk(self, relpath, recurse):
184 '''yields (unencoded, encoded, size)'''
184 '''yields (unencoded, encoded, size)'''
185 path = self.pathjoiner(self.path, relpath)
185 path = self.pathjoiner(self.path, relpath)
186 striplen = len(self.path) + len(os.sep)
186 striplen = len(self.path) + len(os.sep)
187 l = []
187 l = []
188 if os.path.isdir(path):
188 if os.path.isdir(path):
189 visit = [path]
189 visit = [path]
190 while visit:
190 while visit:
191 p = visit.pop()
191 p = visit.pop()
192 for f, kind, st in osutil.listdir(p, stat=True):
192 for f, kind, st in osutil.listdir(p, stat=True):
193 fp = self.pathjoiner(p, f)
193 fp = self.pathjoiner(p, f)
194 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
194 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
195 n = util.pconvert(fp[striplen:])
195 n = util.pconvert(fp[striplen:])
196 l.append((decodedir(n), n, st.st_size))
196 l.append((decodedir(n), n, st.st_size))
197 elif kind == stat.S_IFDIR and recurse:
197 elif kind == stat.S_IFDIR and recurse:
198 visit.append(fp)
198 visit.append(fp)
199 return sorted(l)
199 return sorted(l)
200
200
201 def datafiles(self):
201 def datafiles(self):
202 return self._walk('data', True)
202 return self._walk('data', True)
203
203
204 def walk(self):
204 def walk(self):
205 '''yields (unencoded, encoded, size)'''
205 '''yields (unencoded, encoded, size)'''
206 # yield data files first
206 # yield data files first
207 for x in self.datafiles():
207 for x in self.datafiles():
208 yield x
208 yield x
209 # yield manifest before changelog
209 # yield manifest before changelog
210 for x in reversed(self._walk('', False)):
210 for x in reversed(self._walk('', False)):
211 yield x
211 yield x
212
212
213 def copylist(self):
213 def copylist(self):
214 return ['requires'] + _data.split()
214 return ['requires'] + _data.split()
215
215
216 def write(self):
217 pass
218
216 class encodedstore(basicstore):
219 class encodedstore(basicstore):
217 def __init__(self, path, opener, pathjoiner):
220 def __init__(self, path, opener, pathjoiner):
218 self.pathjoiner = pathjoiner
221 self.pathjoiner = pathjoiner
219 self.path = self.pathjoiner(path, 'store')
222 self.path = self.pathjoiner(path, 'store')
220 self.createmode = _calcmode(self.path)
223 self.createmode = _calcmode(self.path)
221 op = opener(self.path)
224 op = opener(self.path)
222 op.createmode = self.createmode
225 op.createmode = self.createmode
223 self.opener = lambda f, *args, **kw: op(encodefilename(f), *args, **kw)
226 self.opener = lambda f, *args, **kw: op(encodefilename(f), *args, **kw)
224
227
225 def datafiles(self):
228 def datafiles(self):
226 for a, b, size in self._walk('data', True):
229 for a, b, size in self._walk('data', True):
227 try:
230 try:
228 a = decodefilename(a)
231 a = decodefilename(a)
229 except KeyError:
232 except KeyError:
230 a = None
233 a = None
231 yield a, b, size
234 yield a, b, size
232
235
233 def join(self, f):
236 def join(self, f):
234 return self.pathjoiner(self.path, encodefilename(f))
237 return self.pathjoiner(self.path, encodefilename(f))
235
238
236 def copylist(self):
239 def copylist(self):
237 return (['requires', '00changelog.i'] +
240 return (['requires', '00changelog.i'] +
238 [self.pathjoiner('store', f) for f in _data.split()])
241 [self.pathjoiner('store', f) for f in _data.split()])
239
242
240 class fncache(object):
243 class fncache(object):
241 # the filename used to be partially encoded
244 # the filename used to be partially encoded
242 # hence the encodedir/decodedir dance
245 # hence the encodedir/decodedir dance
243 def __init__(self, opener):
246 def __init__(self, opener):
244 self.opener = opener
247 self.opener = opener
245 self.entries = None
248 self.entries = None
249 self._dirty = False
246
250
247 def _load(self):
251 def _load(self):
248 '''fill the entries from the fncache file'''
252 '''fill the entries from the fncache file'''
249 self.entries = set()
253 self.entries = set()
254 self._dirty = False
250 try:
255 try:
251 fp = self.opener('fncache', mode='rb')
256 fp = self.opener('fncache', mode='rb')
252 except IOError:
257 except IOError:
253 # skip nonexistent file
258 # skip nonexistent file
254 return
259 return
255 for n, line in enumerate(fp):
260 for n, line in enumerate(fp):
256 if (len(line) < 2) or (line[-1] != '\n'):
261 if (len(line) < 2) or (line[-1] != '\n'):
257 t = _('invalid entry in fncache, line %s') % (n + 1)
262 t = _('invalid entry in fncache, line %s') % (n + 1)
258 raise util.Abort(t)
263 raise util.Abort(t)
259 self.entries.add(decodedir(line[:-1]))
264 self.entries.add(decodedir(line[:-1]))
260 fp.close()
265 fp.close()
261
266
262 def rewrite(self, files):
267 def rewrite(self, files):
263 fp = self.opener('fncache', mode='wb')
268 fp = self.opener('fncache', mode='wb')
264 for p in files:
269 for p in files:
265 fp.write(encodedir(p) + '\n')
270 fp.write(encodedir(p) + '\n')
266 fp.close()
271 fp.close()
267 self.entries = set(files)
272 self.entries = set(files)
273 self._dirty = False
274
275 def write(self):
276 if not self._dirty:
277 return
278 fp = self.opener('fncache', mode='wb', atomictemp=True)
279 for p in self.entries:
280 fp.write(encodedir(p) + '\n')
281 fp.rename()
282 self._dirty = False
268
283
269 def add(self, fn):
284 def add(self, fn):
270 if self.entries is None:
285 if self.entries is None:
271 self._load()
286 self._load()
272 if fn not in self.entries:
287 if fn not in self.entries:
273 self.opener('fncache', 'ab').write(encodedir(fn) + '\n')
288 self._dirty = True
274 self.entries.add(fn)
289 self.entries.add(fn)
275
290
276 def __contains__(self, fn):
291 def __contains__(self, fn):
277 if self.entries is None:
292 if self.entries is None:
278 self._load()
293 self._load()
279 return fn in self.entries
294 return fn in self.entries
280
295
281 def __iter__(self):
296 def __iter__(self):
282 if self.entries is None:
297 if self.entries is None:
283 self._load()
298 self._load()
284 return iter(self.entries)
299 return iter(self.entries)
285
300
286 class fncachestore(basicstore):
301 class fncachestore(basicstore):
287 def __init__(self, path, opener, pathjoiner, encode):
302 def __init__(self, path, opener, pathjoiner, encode):
288 self.encode = encode
303 self.encode = encode
289 self.pathjoiner = pathjoiner
304 self.pathjoiner = pathjoiner
290 self.path = self.pathjoiner(path, 'store')
305 self.path = self.pathjoiner(path, 'store')
291 self.createmode = _calcmode(self.path)
306 self.createmode = _calcmode(self.path)
292 op = opener(self.path)
307 op = opener(self.path)
293 op.createmode = self.createmode
308 op.createmode = self.createmode
294 fnc = fncache(op)
309 fnc = fncache(op)
295 self.fncache = fnc
310 self.fncache = fnc
296
311
297 def fncacheopener(path, mode='r', *args, **kw):
312 def fncacheopener(path, mode='r', *args, **kw):
298 if mode not in ('r', 'rb') and path.startswith('data/'):
313 if mode not in ('r', 'rb') and path.startswith('data/'):
299 fnc.add(path)
314 fnc.add(path)
300 return op(self.encode(path), mode, *args, **kw)
315 return op(self.encode(path), mode, *args, **kw)
301 self.opener = fncacheopener
316 self.opener = fncacheopener
302
317
303 def join(self, f):
318 def join(self, f):
304 return self.pathjoiner(self.path, self.encode(f))
319 return self.pathjoiner(self.path, self.encode(f))
305
320
306 def datafiles(self):
321 def datafiles(self):
307 rewrite = False
322 rewrite = False
308 existing = []
323 existing = []
309 pjoin = self.pathjoiner
324 pjoin = self.pathjoiner
310 spath = self.path
325 spath = self.path
311 for f in self.fncache:
326 for f in self.fncache:
312 ef = self.encode(f)
327 ef = self.encode(f)
313 try:
328 try:
314 st = os.stat(pjoin(spath, ef))
329 st = os.stat(pjoin(spath, ef))
315 yield f, ef, st.st_size
330 yield f, ef, st.st_size
316 existing.append(f)
331 existing.append(f)
317 except OSError:
332 except OSError:
318 # nonexistent entry
333 # nonexistent entry
319 rewrite = True
334 rewrite = True
320 if rewrite:
335 if rewrite:
321 # rewrite fncache to remove nonexistent entries
336 # rewrite fncache to remove nonexistent entries
322 # (may be caused by rollback / strip)
337 # (may be caused by rollback / strip)
323 self.fncache.rewrite(existing)
338 self.fncache.rewrite(existing)
324
339
325 def copylist(self):
340 def copylist(self):
326 d = ('data dh fncache'
341 d = ('data dh fncache'
327 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
342 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
328 return (['requires', '00changelog.i'] +
343 return (['requires', '00changelog.i'] +
329 [self.pathjoiner('store', f) for f in d.split()])
344 [self.pathjoiner('store', f) for f in d.split()])
330
345
346 def write(self):
347 self.fncache.write()
348
331 def store(requirements, path, opener, pathjoiner=None):
349 def store(requirements, path, opener, pathjoiner=None):
332 pathjoiner = pathjoiner or os.path.join
350 pathjoiner = pathjoiner or os.path.join
333 if 'store' in requirements:
351 if 'store' in requirements:
334 if 'fncache' in requirements:
352 if 'fncache' in requirements:
335 auxencode = lambda f: _auxencode(f, 'dotencode' in requirements)
353 auxencode = lambda f: _auxencode(f, 'dotencode' in requirements)
336 encode = lambda f: _hybridencode(f, auxencode)
354 encode = lambda f: _hybridencode(f, auxencode)
337 return fncachestore(path, opener, pathjoiner, encode)
355 return fncachestore(path, opener, pathjoiner, encode)
338 return encodedstore(path, opener, pathjoiner)
356 return encodedstore(path, opener, pathjoiner)
339 return basicstore(path, opener, pathjoiner)
357 return basicstore(path, opener, pathjoiner)
General Comments 0
You need to be logged in to leave comments. Login now