##// END OF EJS Templates
store: encode first period or space in filenames (issue1713)...
Adrian Buehlmann -
r12687:34d8247a default
parent child Browse files
Show More
@@ -1,1890 +1,1893 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26
27
27 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
28 repo.repository.__init__(self)
29 repo.repository.__init__(self)
29 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
30 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
31 self.origroot = path
32 self.origroot = path
32 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
34 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
35 self.baseui = baseui
36 self.baseui = baseui
36 self.ui = baseui.copy()
37 self.ui = baseui.copy()
37
38
38 try:
39 try:
39 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
41 except IOError:
42 except IOError:
42 pass
43 pass
43
44
44 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
45 if create:
46 if create:
46 if not os.path.exists(path):
47 if not os.path.exists(path):
47 util.makedirs(path)
48 util.makedirs(path)
48 os.mkdir(self.path)
49 os.mkdir(self.path)
49 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
50 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
51 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
52 requirements.append("store")
53 requirements.append("store")
53 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
54 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
55 # create an invalid changelog
58 # create an invalid changelog
56 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
57 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
58 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
59 )
62 )
60 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
61 requirements.append("parentdelta")
64 requirements.append("parentdelta")
62 else:
65 else:
63 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
64 elif create:
67 elif create:
65 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
66 else:
69 else:
67 # find requirements
70 # find requirements
68 requirements = set()
71 requirements = set()
69 try:
72 try:
70 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
71 except IOError, inst:
74 except IOError, inst:
72 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
73 raise
76 raise
74 for r in requirements - self.supported:
77 for r in requirements - self.supported:
75 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
76
79
77 self.sharedpath = self.path
80 self.sharedpath = self.path
78 try:
81 try:
79 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
80 if not os.path.exists(s):
83 if not os.path.exists(s):
81 raise error.RepoError(
84 raise error.RepoError(
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
83 self.sharedpath = s
86 self.sharedpath = s
84 except IOError, inst:
87 except IOError, inst:
85 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
86 raise
89 raise
87
90
88 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
89 self.spath = self.store.path
92 self.spath = self.store.path
90 self.sopener = self.store.opener
93 self.sopener = self.store.opener
91 self.sjoin = self.store.join
94 self.sjoin = self.store.join
92 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
93 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
94 if create:
97 if create:
95 self._writerequirements()
98 self._writerequirements()
96
99
97 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
98 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
99 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
100 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
101 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
102 self._tags = None
105 self._tags = None
103 self._tagtypes = None
106 self._tagtypes = None
104
107
105 self._branchcache = None # in UTF-8
108 self._branchcache = None # in UTF-8
106 self._branchcachetip = None
109 self._branchcachetip = None
107 self.nodetagscache = None
110 self.nodetagscache = None
108 self.filterpats = {}
111 self.filterpats = {}
109 self._datafilters = {}
112 self._datafilters = {}
110 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
111
114
112 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
113 self.requirements = requirements
116 self.requirements = requirements
114 self.sopener.options = {}
117 self.sopener.options = {}
115 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
116 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
117
120
118 def _writerequirements(self):
121 def _writerequirements(self):
119 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
120 for r in self.requirements:
123 for r in self.requirements:
121 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
122 reqfile.close()
125 reqfile.close()
123
126
124 def _checknested(self, path):
127 def _checknested(self, path):
125 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
126 if not path.startswith(self.root):
129 if not path.startswith(self.root):
127 return False
130 return False
128 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
129
132
130 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
131 # the sense that it can reject things like
134 # the sense that it can reject things like
132 #
135 #
133 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
134 #
137 #
135 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
136 # parent revision.
139 # parent revision.
137 #
140 #
138 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
139 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
140 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
141 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
142 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
143 #
146 #
144 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
145 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
146 # the filesystem *now*.
149 # the filesystem *now*.
147 ctx = self[None]
150 ctx = self[None]
148 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
149 while parts:
152 while parts:
150 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
151 if prefix in ctx.substate:
154 if prefix in ctx.substate:
152 if prefix == subpath:
155 if prefix == subpath:
153 return True
156 return True
154 else:
157 else:
155 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
156 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
157 else:
160 else:
158 parts.pop()
161 parts.pop()
159 return False
162 return False
160
163
161
164
162 @propertycache
165 @propertycache
163 def changelog(self):
166 def changelog(self):
164 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
165 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
166 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
167 if p.startswith(self.root):
170 if p.startswith(self.root):
168 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
169 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
170 return c
173 return c
171
174
172 @propertycache
175 @propertycache
173 def manifest(self):
176 def manifest(self):
174 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
175
178
176 @propertycache
179 @propertycache
177 def dirstate(self):
180 def dirstate(self):
178 return dirstate.dirstate(self.opener, self.ui, self.root)
181 return dirstate.dirstate(self.opener, self.ui, self.root)
179
182
180 def __getitem__(self, changeid):
183 def __getitem__(self, changeid):
181 if changeid is None:
184 if changeid is None:
182 return context.workingctx(self)
185 return context.workingctx(self)
183 return context.changectx(self, changeid)
186 return context.changectx(self, changeid)
184
187
185 def __contains__(self, changeid):
188 def __contains__(self, changeid):
186 try:
189 try:
187 return bool(self.lookup(changeid))
190 return bool(self.lookup(changeid))
188 except error.RepoLookupError:
191 except error.RepoLookupError:
189 return False
192 return False
190
193
191 def __nonzero__(self):
194 def __nonzero__(self):
192 return True
195 return True
193
196
194 def __len__(self):
197 def __len__(self):
195 return len(self.changelog)
198 return len(self.changelog)
196
199
197 def __iter__(self):
200 def __iter__(self):
198 for i in xrange(len(self)):
201 for i in xrange(len(self)):
199 yield i
202 yield i
200
203
201 def url(self):
204 def url(self):
202 return 'file:' + self.root
205 return 'file:' + self.root
203
206
204 def hook(self, name, throw=False, **args):
207 def hook(self, name, throw=False, **args):
205 return hook.hook(self.ui, self, name, throw, **args)
208 return hook.hook(self.ui, self, name, throw, **args)
206
209
207 tag_disallowed = ':\r\n'
210 tag_disallowed = ':\r\n'
208
211
209 def _tag(self, names, node, message, local, user, date, extra={}):
212 def _tag(self, names, node, message, local, user, date, extra={}):
210 if isinstance(names, str):
213 if isinstance(names, str):
211 allchars = names
214 allchars = names
212 names = (names,)
215 names = (names,)
213 else:
216 else:
214 allchars = ''.join(names)
217 allchars = ''.join(names)
215 for c in self.tag_disallowed:
218 for c in self.tag_disallowed:
216 if c in allchars:
219 if c in allchars:
217 raise util.Abort(_('%r cannot be used in a tag name') % c)
220 raise util.Abort(_('%r cannot be used in a tag name') % c)
218
221
219 branches = self.branchmap()
222 branches = self.branchmap()
220 for name in names:
223 for name in names:
221 self.hook('pretag', throw=True, node=hex(node), tag=name,
224 self.hook('pretag', throw=True, node=hex(node), tag=name,
222 local=local)
225 local=local)
223 if name in branches:
226 if name in branches:
224 self.ui.warn(_("warning: tag %s conflicts with existing"
227 self.ui.warn(_("warning: tag %s conflicts with existing"
225 " branch name\n") % name)
228 " branch name\n") % name)
226
229
227 def writetags(fp, names, munge, prevtags):
230 def writetags(fp, names, munge, prevtags):
228 fp.seek(0, 2)
231 fp.seek(0, 2)
229 if prevtags and prevtags[-1] != '\n':
232 if prevtags and prevtags[-1] != '\n':
230 fp.write('\n')
233 fp.write('\n')
231 for name in names:
234 for name in names:
232 m = munge and munge(name) or name
235 m = munge and munge(name) or name
233 if self._tagtypes and name in self._tagtypes:
236 if self._tagtypes and name in self._tagtypes:
234 old = self._tags.get(name, nullid)
237 old = self._tags.get(name, nullid)
235 fp.write('%s %s\n' % (hex(old), m))
238 fp.write('%s %s\n' % (hex(old), m))
236 fp.write('%s %s\n' % (hex(node), m))
239 fp.write('%s %s\n' % (hex(node), m))
237 fp.close()
240 fp.close()
238
241
239 prevtags = ''
242 prevtags = ''
240 if local:
243 if local:
241 try:
244 try:
242 fp = self.opener('localtags', 'r+')
245 fp = self.opener('localtags', 'r+')
243 except IOError:
246 except IOError:
244 fp = self.opener('localtags', 'a')
247 fp = self.opener('localtags', 'a')
245 else:
248 else:
246 prevtags = fp.read()
249 prevtags = fp.read()
247
250
248 # local tags are stored in the current charset
251 # local tags are stored in the current charset
249 writetags(fp, names, None, prevtags)
252 writetags(fp, names, None, prevtags)
250 for name in names:
253 for name in names:
251 self.hook('tag', node=hex(node), tag=name, local=local)
254 self.hook('tag', node=hex(node), tag=name, local=local)
252 return
255 return
253
256
254 try:
257 try:
255 fp = self.wfile('.hgtags', 'rb+')
258 fp = self.wfile('.hgtags', 'rb+')
256 except IOError:
259 except IOError:
257 fp = self.wfile('.hgtags', 'ab')
260 fp = self.wfile('.hgtags', 'ab')
258 else:
261 else:
259 prevtags = fp.read()
262 prevtags = fp.read()
260
263
261 # committed tags are stored in UTF-8
264 # committed tags are stored in UTF-8
262 writetags(fp, names, encoding.fromlocal, prevtags)
265 writetags(fp, names, encoding.fromlocal, prevtags)
263
266
264 if '.hgtags' not in self.dirstate:
267 if '.hgtags' not in self.dirstate:
265 self[None].add(['.hgtags'])
268 self[None].add(['.hgtags'])
266
269
267 m = matchmod.exact(self.root, '', ['.hgtags'])
270 m = matchmod.exact(self.root, '', ['.hgtags'])
268 tagnode = self.commit(message, user, date, extra=extra, match=m)
271 tagnode = self.commit(message, user, date, extra=extra, match=m)
269
272
270 for name in names:
273 for name in names:
271 self.hook('tag', node=hex(node), tag=name, local=local)
274 self.hook('tag', node=hex(node), tag=name, local=local)
272
275
273 return tagnode
276 return tagnode
274
277
275 def tag(self, names, node, message, local, user, date):
278 def tag(self, names, node, message, local, user, date):
276 '''tag a revision with one or more symbolic names.
279 '''tag a revision with one or more symbolic names.
277
280
278 names is a list of strings or, when adding a single tag, names may be a
281 names is a list of strings or, when adding a single tag, names may be a
279 string.
282 string.
280
283
281 if local is True, the tags are stored in a per-repository file.
284 if local is True, the tags are stored in a per-repository file.
282 otherwise, they are stored in the .hgtags file, and a new
285 otherwise, they are stored in the .hgtags file, and a new
283 changeset is committed with the change.
286 changeset is committed with the change.
284
287
285 keyword arguments:
288 keyword arguments:
286
289
287 local: whether to store tags in non-version-controlled file
290 local: whether to store tags in non-version-controlled file
288 (default False)
291 (default False)
289
292
290 message: commit message to use if committing
293 message: commit message to use if committing
291
294
292 user: name of user to use if committing
295 user: name of user to use if committing
293
296
294 date: date tuple to use if committing'''
297 date: date tuple to use if committing'''
295
298
296 for x in self.status()[:5]:
299 for x in self.status()[:5]:
297 if '.hgtags' in x:
300 if '.hgtags' in x:
298 raise util.Abort(_('working copy of .hgtags is changed '
301 raise util.Abort(_('working copy of .hgtags is changed '
299 '(please commit .hgtags manually)'))
302 '(please commit .hgtags manually)'))
300
303
301 self.tags() # instantiate the cache
304 self.tags() # instantiate the cache
302 self._tag(names, node, message, local, user, date)
305 self._tag(names, node, message, local, user, date)
303
306
304 def tags(self):
307 def tags(self):
305 '''return a mapping of tag to node'''
308 '''return a mapping of tag to node'''
306 if self._tags is None:
309 if self._tags is None:
307 (self._tags, self._tagtypes) = self._findtags()
310 (self._tags, self._tagtypes) = self._findtags()
308
311
309 return self._tags
312 return self._tags
310
313
311 def _findtags(self):
314 def _findtags(self):
312 '''Do the hard work of finding tags. Return a pair of dicts
315 '''Do the hard work of finding tags. Return a pair of dicts
313 (tags, tagtypes) where tags maps tag name to node, and tagtypes
316 (tags, tagtypes) where tags maps tag name to node, and tagtypes
314 maps tag name to a string like \'global\' or \'local\'.
317 maps tag name to a string like \'global\' or \'local\'.
315 Subclasses or extensions are free to add their own tags, but
318 Subclasses or extensions are free to add their own tags, but
316 should be aware that the returned dicts will be retained for the
319 should be aware that the returned dicts will be retained for the
317 duration of the localrepo object.'''
320 duration of the localrepo object.'''
318
321
319 # XXX what tagtype should subclasses/extensions use? Currently
322 # XXX what tagtype should subclasses/extensions use? Currently
320 # mq and bookmarks add tags, but do not set the tagtype at all.
323 # mq and bookmarks add tags, but do not set the tagtype at all.
321 # Should each extension invent its own tag type? Should there
324 # Should each extension invent its own tag type? Should there
322 # be one tagtype for all such "virtual" tags? Or is the status
325 # be one tagtype for all such "virtual" tags? Or is the status
323 # quo fine?
326 # quo fine?
324
327
325 alltags = {} # map tag name to (node, hist)
328 alltags = {} # map tag name to (node, hist)
326 tagtypes = {}
329 tagtypes = {}
327
330
328 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
331 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
329 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
332 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
330
333
331 # Build the return dicts. Have to re-encode tag names because
334 # Build the return dicts. Have to re-encode tag names because
332 # the tags module always uses UTF-8 (in order not to lose info
335 # the tags module always uses UTF-8 (in order not to lose info
333 # writing to the cache), but the rest of Mercurial wants them in
336 # writing to the cache), but the rest of Mercurial wants them in
334 # local encoding.
337 # local encoding.
335 tags = {}
338 tags = {}
336 for (name, (node, hist)) in alltags.iteritems():
339 for (name, (node, hist)) in alltags.iteritems():
337 if node != nullid:
340 if node != nullid:
338 tags[encoding.tolocal(name)] = node
341 tags[encoding.tolocal(name)] = node
339 tags['tip'] = self.changelog.tip()
342 tags['tip'] = self.changelog.tip()
340 tagtypes = dict([(encoding.tolocal(name), value)
343 tagtypes = dict([(encoding.tolocal(name), value)
341 for (name, value) in tagtypes.iteritems()])
344 for (name, value) in tagtypes.iteritems()])
342 return (tags, tagtypes)
345 return (tags, tagtypes)
343
346
344 def tagtype(self, tagname):
347 def tagtype(self, tagname):
345 '''
348 '''
346 return the type of the given tag. result can be:
349 return the type of the given tag. result can be:
347
350
348 'local' : a local tag
351 'local' : a local tag
349 'global' : a global tag
352 'global' : a global tag
350 None : tag does not exist
353 None : tag does not exist
351 '''
354 '''
352
355
353 self.tags()
356 self.tags()
354
357
355 return self._tagtypes.get(tagname)
358 return self._tagtypes.get(tagname)
356
359
357 def tagslist(self):
360 def tagslist(self):
358 '''return a list of tags ordered by revision'''
361 '''return a list of tags ordered by revision'''
359 l = []
362 l = []
360 for t, n in self.tags().iteritems():
363 for t, n in self.tags().iteritems():
361 try:
364 try:
362 r = self.changelog.rev(n)
365 r = self.changelog.rev(n)
363 except:
366 except:
364 r = -2 # sort to the beginning of the list if unknown
367 r = -2 # sort to the beginning of the list if unknown
365 l.append((r, t, n))
368 l.append((r, t, n))
366 return [(t, n) for r, t, n in sorted(l)]
369 return [(t, n) for r, t, n in sorted(l)]
367
370
368 def nodetags(self, node):
371 def nodetags(self, node):
369 '''return the tags associated with a node'''
372 '''return the tags associated with a node'''
370 if not self.nodetagscache:
373 if not self.nodetagscache:
371 self.nodetagscache = {}
374 self.nodetagscache = {}
372 for t, n in self.tags().iteritems():
375 for t, n in self.tags().iteritems():
373 self.nodetagscache.setdefault(n, []).append(t)
376 self.nodetagscache.setdefault(n, []).append(t)
374 for tags in self.nodetagscache.itervalues():
377 for tags in self.nodetagscache.itervalues():
375 tags.sort()
378 tags.sort()
376 return self.nodetagscache.get(node, [])
379 return self.nodetagscache.get(node, [])
377
380
378 def _branchtags(self, partial, lrev):
381 def _branchtags(self, partial, lrev):
379 # TODO: rename this function?
382 # TODO: rename this function?
380 tiprev = len(self) - 1
383 tiprev = len(self) - 1
381 if lrev != tiprev:
384 if lrev != tiprev:
382 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
385 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
383 self._updatebranchcache(partial, ctxgen)
386 self._updatebranchcache(partial, ctxgen)
384 self._writebranchcache(partial, self.changelog.tip(), tiprev)
387 self._writebranchcache(partial, self.changelog.tip(), tiprev)
385
388
386 return partial
389 return partial
387
390
388 def updatebranchcache(self):
391 def updatebranchcache(self):
389 tip = self.changelog.tip()
392 tip = self.changelog.tip()
390 if self._branchcache is not None and self._branchcachetip == tip:
393 if self._branchcache is not None and self._branchcachetip == tip:
391 return self._branchcache
394 return self._branchcache
392
395
393 oldtip = self._branchcachetip
396 oldtip = self._branchcachetip
394 self._branchcachetip = tip
397 self._branchcachetip = tip
395 if oldtip is None or oldtip not in self.changelog.nodemap:
398 if oldtip is None or oldtip not in self.changelog.nodemap:
396 partial, last, lrev = self._readbranchcache()
399 partial, last, lrev = self._readbranchcache()
397 else:
400 else:
398 lrev = self.changelog.rev(oldtip)
401 lrev = self.changelog.rev(oldtip)
399 partial = self._branchcache
402 partial = self._branchcache
400
403
401 self._branchtags(partial, lrev)
404 self._branchtags(partial, lrev)
402 # this private cache holds all heads (not just tips)
405 # this private cache holds all heads (not just tips)
403 self._branchcache = partial
406 self._branchcache = partial
404
407
405 def branchmap(self):
408 def branchmap(self):
406 '''returns a dictionary {branch: [branchheads]}'''
409 '''returns a dictionary {branch: [branchheads]}'''
407 self.updatebranchcache()
410 self.updatebranchcache()
408 return self._branchcache
411 return self._branchcache
409
412
410 def branchtags(self):
413 def branchtags(self):
411 '''return a dict where branch names map to the tipmost head of
414 '''return a dict where branch names map to the tipmost head of
412 the branch, open heads come before closed'''
415 the branch, open heads come before closed'''
413 bt = {}
416 bt = {}
414 for bn, heads in self.branchmap().iteritems():
417 for bn, heads in self.branchmap().iteritems():
415 tip = heads[-1]
418 tip = heads[-1]
416 for h in reversed(heads):
419 for h in reversed(heads):
417 if 'close' not in self.changelog.read(h)[5]:
420 if 'close' not in self.changelog.read(h)[5]:
418 tip = h
421 tip = h
419 break
422 break
420 bt[bn] = tip
423 bt[bn] = tip
421 return bt
424 return bt
422
425
423
426
424 def _readbranchcache(self):
427 def _readbranchcache(self):
425 partial = {}
428 partial = {}
426 try:
429 try:
427 f = self.opener("branchheads.cache")
430 f = self.opener("branchheads.cache")
428 lines = f.read().split('\n')
431 lines = f.read().split('\n')
429 f.close()
432 f.close()
430 except (IOError, OSError):
433 except (IOError, OSError):
431 return {}, nullid, nullrev
434 return {}, nullid, nullrev
432
435
433 try:
436 try:
434 last, lrev = lines.pop(0).split(" ", 1)
437 last, lrev = lines.pop(0).split(" ", 1)
435 last, lrev = bin(last), int(lrev)
438 last, lrev = bin(last), int(lrev)
436 if lrev >= len(self) or self[lrev].node() != last:
439 if lrev >= len(self) or self[lrev].node() != last:
437 # invalidate the cache
440 # invalidate the cache
438 raise ValueError('invalidating branch cache (tip differs)')
441 raise ValueError('invalidating branch cache (tip differs)')
439 for l in lines:
442 for l in lines:
440 if not l:
443 if not l:
441 continue
444 continue
442 node, label = l.split(" ", 1)
445 node, label = l.split(" ", 1)
443 partial.setdefault(label.strip(), []).append(bin(node))
446 partial.setdefault(label.strip(), []).append(bin(node))
444 except KeyboardInterrupt:
447 except KeyboardInterrupt:
445 raise
448 raise
446 except Exception, inst:
449 except Exception, inst:
447 if self.ui.debugflag:
450 if self.ui.debugflag:
448 self.ui.warn(str(inst), '\n')
451 self.ui.warn(str(inst), '\n')
449 partial, last, lrev = {}, nullid, nullrev
452 partial, last, lrev = {}, nullid, nullrev
450 return partial, last, lrev
453 return partial, last, lrev
451
454
452 def _writebranchcache(self, branches, tip, tiprev):
455 def _writebranchcache(self, branches, tip, tiprev):
453 try:
456 try:
454 f = self.opener("branchheads.cache", "w", atomictemp=True)
457 f = self.opener("branchheads.cache", "w", atomictemp=True)
455 f.write("%s %s\n" % (hex(tip), tiprev))
458 f.write("%s %s\n" % (hex(tip), tiprev))
456 for label, nodes in branches.iteritems():
459 for label, nodes in branches.iteritems():
457 for node in nodes:
460 for node in nodes:
458 f.write("%s %s\n" % (hex(node), label))
461 f.write("%s %s\n" % (hex(node), label))
459 f.rename()
462 f.rename()
460 except (IOError, OSError):
463 except (IOError, OSError):
461 pass
464 pass
462
465
463 def _updatebranchcache(self, partial, ctxgen):
466 def _updatebranchcache(self, partial, ctxgen):
464 # collect new branch entries
467 # collect new branch entries
465 newbranches = {}
468 newbranches = {}
466 for c in ctxgen:
469 for c in ctxgen:
467 newbranches.setdefault(c.branch(), []).append(c.node())
470 newbranches.setdefault(c.branch(), []).append(c.node())
468 # if older branchheads are reachable from new ones, they aren't
471 # if older branchheads are reachable from new ones, they aren't
469 # really branchheads. Note checking parents is insufficient:
472 # really branchheads. Note checking parents is insufficient:
470 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
473 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
471 for branch, newnodes in newbranches.iteritems():
474 for branch, newnodes in newbranches.iteritems():
472 bheads = partial.setdefault(branch, [])
475 bheads = partial.setdefault(branch, [])
473 bheads.extend(newnodes)
476 bheads.extend(newnodes)
474 if len(bheads) <= 1:
477 if len(bheads) <= 1:
475 continue
478 continue
476 # starting from tip means fewer passes over reachable
479 # starting from tip means fewer passes over reachable
477 while newnodes:
480 while newnodes:
478 latest = newnodes.pop()
481 latest = newnodes.pop()
479 if latest not in bheads:
482 if latest not in bheads:
480 continue
483 continue
481 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
484 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
482 reachable = self.changelog.reachable(latest, minbhrev)
485 reachable = self.changelog.reachable(latest, minbhrev)
483 reachable.remove(latest)
486 reachable.remove(latest)
484 bheads = [b for b in bheads if b not in reachable]
487 bheads = [b for b in bheads if b not in reachable]
485 partial[branch] = bheads
488 partial[branch] = bheads
486
489
487 def lookup(self, key):
490 def lookup(self, key):
488 if isinstance(key, int):
491 if isinstance(key, int):
489 return self.changelog.node(key)
492 return self.changelog.node(key)
490 elif key == '.':
493 elif key == '.':
491 return self.dirstate.parents()[0]
494 return self.dirstate.parents()[0]
492 elif key == 'null':
495 elif key == 'null':
493 return nullid
496 return nullid
494 elif key == 'tip':
497 elif key == 'tip':
495 return self.changelog.tip()
498 return self.changelog.tip()
496 n = self.changelog._match(key)
499 n = self.changelog._match(key)
497 if n:
500 if n:
498 return n
501 return n
499 if key in self.tags():
502 if key in self.tags():
500 return self.tags()[key]
503 return self.tags()[key]
501 if key in self.branchtags():
504 if key in self.branchtags():
502 return self.branchtags()[key]
505 return self.branchtags()[key]
503 n = self.changelog._partialmatch(key)
506 n = self.changelog._partialmatch(key)
504 if n:
507 if n:
505 return n
508 return n
506
509
507 # can't find key, check if it might have come from damaged dirstate
510 # can't find key, check if it might have come from damaged dirstate
508 if key in self.dirstate.parents():
511 if key in self.dirstate.parents():
509 raise error.Abort(_("working directory has unknown parent '%s'!")
512 raise error.Abort(_("working directory has unknown parent '%s'!")
510 % short(key))
513 % short(key))
511 try:
514 try:
512 if len(key) == 20:
515 if len(key) == 20:
513 key = hex(key)
516 key = hex(key)
514 except:
517 except:
515 pass
518 pass
516 raise error.RepoLookupError(_("unknown revision '%s'") % key)
519 raise error.RepoLookupError(_("unknown revision '%s'") % key)
517
520
518 def lookupbranch(self, key, remote=None):
521 def lookupbranch(self, key, remote=None):
519 repo = remote or self
522 repo = remote or self
520 if key in repo.branchmap():
523 if key in repo.branchmap():
521 return key
524 return key
522
525
523 repo = (remote and remote.local()) and remote or self
526 repo = (remote and remote.local()) and remote or self
524 return repo[key].branch()
527 return repo[key].branch()
525
528
526 def local(self):
529 def local(self):
527 return True
530 return True
528
531
529 def join(self, f):
532 def join(self, f):
530 return os.path.join(self.path, f)
533 return os.path.join(self.path, f)
531
534
532 def wjoin(self, f):
535 def wjoin(self, f):
533 return os.path.join(self.root, f)
536 return os.path.join(self.root, f)
534
537
535 def file(self, f):
538 def file(self, f):
536 if f[0] == '/':
539 if f[0] == '/':
537 f = f[1:]
540 f = f[1:]
538 return filelog.filelog(self.sopener, f)
541 return filelog.filelog(self.sopener, f)
539
542
540 def changectx(self, changeid):
543 def changectx(self, changeid):
541 return self[changeid]
544 return self[changeid]
542
545
543 def parents(self, changeid=None):
546 def parents(self, changeid=None):
544 '''get list of changectxs for parents of changeid'''
547 '''get list of changectxs for parents of changeid'''
545 return self[changeid].parents()
548 return self[changeid].parents()
546
549
547 def filectx(self, path, changeid=None, fileid=None):
550 def filectx(self, path, changeid=None, fileid=None):
548 """changeid can be a changeset revision, node, or tag.
551 """changeid can be a changeset revision, node, or tag.
549 fileid can be a file revision or node."""
552 fileid can be a file revision or node."""
550 return context.filectx(self, path, changeid, fileid)
553 return context.filectx(self, path, changeid, fileid)
551
554
552 def getcwd(self):
555 def getcwd(self):
553 return self.dirstate.getcwd()
556 return self.dirstate.getcwd()
554
557
555 def pathto(self, f, cwd=None):
558 def pathto(self, f, cwd=None):
556 return self.dirstate.pathto(f, cwd)
559 return self.dirstate.pathto(f, cwd)
557
560
558 def wfile(self, f, mode='r'):
561 def wfile(self, f, mode='r'):
559 return self.wopener(f, mode)
562 return self.wopener(f, mode)
560
563
561 def _link(self, f):
564 def _link(self, f):
562 return os.path.islink(self.wjoin(f))
565 return os.path.islink(self.wjoin(f))
563
566
564 def _loadfilter(self, filter):
567 def _loadfilter(self, filter):
565 if filter not in self.filterpats:
568 if filter not in self.filterpats:
566 l = []
569 l = []
567 for pat, cmd in self.ui.configitems(filter):
570 for pat, cmd in self.ui.configitems(filter):
568 if cmd == '!':
571 if cmd == '!':
569 continue
572 continue
570 mf = matchmod.match(self.root, '', [pat])
573 mf = matchmod.match(self.root, '', [pat])
571 fn = None
574 fn = None
572 params = cmd
575 params = cmd
573 for name, filterfn in self._datafilters.iteritems():
576 for name, filterfn in self._datafilters.iteritems():
574 if cmd.startswith(name):
577 if cmd.startswith(name):
575 fn = filterfn
578 fn = filterfn
576 params = cmd[len(name):].lstrip()
579 params = cmd[len(name):].lstrip()
577 break
580 break
578 if not fn:
581 if not fn:
579 fn = lambda s, c, **kwargs: util.filter(s, c)
582 fn = lambda s, c, **kwargs: util.filter(s, c)
580 # Wrap old filters not supporting keyword arguments
583 # Wrap old filters not supporting keyword arguments
581 if not inspect.getargspec(fn)[2]:
584 if not inspect.getargspec(fn)[2]:
582 oldfn = fn
585 oldfn = fn
583 fn = lambda s, c, **kwargs: oldfn(s, c)
586 fn = lambda s, c, **kwargs: oldfn(s, c)
584 l.append((mf, fn, params))
587 l.append((mf, fn, params))
585 self.filterpats[filter] = l
588 self.filterpats[filter] = l
586
589
587 def _filter(self, filter, filename, data):
590 def _filter(self, filter, filename, data):
588 self._loadfilter(filter)
591 self._loadfilter(filter)
589
592
590 for mf, fn, cmd in self.filterpats[filter]:
593 for mf, fn, cmd in self.filterpats[filter]:
591 if mf(filename):
594 if mf(filename):
592 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
595 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
593 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
596 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
594 break
597 break
595
598
596 return data
599 return data
597
600
598 def adddatafilter(self, name, filter):
601 def adddatafilter(self, name, filter):
599 self._datafilters[name] = filter
602 self._datafilters[name] = filter
600
603
601 def wread(self, filename):
604 def wread(self, filename):
602 if self._link(filename):
605 if self._link(filename):
603 data = os.readlink(self.wjoin(filename))
606 data = os.readlink(self.wjoin(filename))
604 else:
607 else:
605 data = self.wopener(filename, 'r').read()
608 data = self.wopener(filename, 'r').read()
606 return self._filter("encode", filename, data)
609 return self._filter("encode", filename, data)
607
610
608 def wwrite(self, filename, data, flags):
611 def wwrite(self, filename, data, flags):
609 data = self._filter("decode", filename, data)
612 data = self._filter("decode", filename, data)
610 try:
613 try:
611 os.unlink(self.wjoin(filename))
614 os.unlink(self.wjoin(filename))
612 except OSError:
615 except OSError:
613 pass
616 pass
614 if 'l' in flags:
617 if 'l' in flags:
615 self.wopener.symlink(data, filename)
618 self.wopener.symlink(data, filename)
616 else:
619 else:
617 self.wopener(filename, 'w').write(data)
620 self.wopener(filename, 'w').write(data)
618 if 'x' in flags:
621 if 'x' in flags:
619 util.set_flags(self.wjoin(filename), False, True)
622 util.set_flags(self.wjoin(filename), False, True)
620
623
621 def wwritedata(self, filename, data):
624 def wwritedata(self, filename, data):
622 return self._filter("decode", filename, data)
625 return self._filter("decode", filename, data)
623
626
624 def transaction(self, desc):
627 def transaction(self, desc):
625 tr = self._transref and self._transref() or None
628 tr = self._transref and self._transref() or None
626 if tr and tr.running():
629 if tr and tr.running():
627 return tr.nest()
630 return tr.nest()
628
631
629 # abort here if the journal already exists
632 # abort here if the journal already exists
630 if os.path.exists(self.sjoin("journal")):
633 if os.path.exists(self.sjoin("journal")):
631 raise error.RepoError(
634 raise error.RepoError(
632 _("abandoned transaction found - run hg recover"))
635 _("abandoned transaction found - run hg recover"))
633
636
634 # save dirstate for rollback
637 # save dirstate for rollback
635 try:
638 try:
636 ds = self.opener("dirstate").read()
639 ds = self.opener("dirstate").read()
637 except IOError:
640 except IOError:
638 ds = ""
641 ds = ""
639 self.opener("journal.dirstate", "w").write(ds)
642 self.opener("journal.dirstate", "w").write(ds)
640 self.opener("journal.branch", "w").write(self.dirstate.branch())
643 self.opener("journal.branch", "w").write(self.dirstate.branch())
641 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
644 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
642
645
643 renames = [(self.sjoin("journal"), self.sjoin("undo")),
646 renames = [(self.sjoin("journal"), self.sjoin("undo")),
644 (self.join("journal.dirstate"), self.join("undo.dirstate")),
647 (self.join("journal.dirstate"), self.join("undo.dirstate")),
645 (self.join("journal.branch"), self.join("undo.branch")),
648 (self.join("journal.branch"), self.join("undo.branch")),
646 (self.join("journal.desc"), self.join("undo.desc"))]
649 (self.join("journal.desc"), self.join("undo.desc"))]
647 tr = transaction.transaction(self.ui.warn, self.sopener,
650 tr = transaction.transaction(self.ui.warn, self.sopener,
648 self.sjoin("journal"),
651 self.sjoin("journal"),
649 aftertrans(renames),
652 aftertrans(renames),
650 self.store.createmode)
653 self.store.createmode)
651 self._transref = weakref.ref(tr)
654 self._transref = weakref.ref(tr)
652 return tr
655 return tr
653
656
654 def recover(self):
657 def recover(self):
655 lock = self.lock()
658 lock = self.lock()
656 try:
659 try:
657 if os.path.exists(self.sjoin("journal")):
660 if os.path.exists(self.sjoin("journal")):
658 self.ui.status(_("rolling back interrupted transaction\n"))
661 self.ui.status(_("rolling back interrupted transaction\n"))
659 transaction.rollback(self.sopener, self.sjoin("journal"),
662 transaction.rollback(self.sopener, self.sjoin("journal"),
660 self.ui.warn)
663 self.ui.warn)
661 self.invalidate()
664 self.invalidate()
662 return True
665 return True
663 else:
666 else:
664 self.ui.warn(_("no interrupted transaction available\n"))
667 self.ui.warn(_("no interrupted transaction available\n"))
665 return False
668 return False
666 finally:
669 finally:
667 lock.release()
670 lock.release()
668
671
669 def rollback(self, dryrun=False):
672 def rollback(self, dryrun=False):
670 wlock = lock = None
673 wlock = lock = None
671 try:
674 try:
672 wlock = self.wlock()
675 wlock = self.wlock()
673 lock = self.lock()
676 lock = self.lock()
674 if os.path.exists(self.sjoin("undo")):
677 if os.path.exists(self.sjoin("undo")):
675 try:
678 try:
676 args = self.opener("undo.desc", "r").read().splitlines()
679 args = self.opener("undo.desc", "r").read().splitlines()
677 if len(args) >= 3 and self.ui.verbose:
680 if len(args) >= 3 and self.ui.verbose:
678 desc = _("rolling back to revision %s"
681 desc = _("rolling back to revision %s"
679 " (undo %s: %s)\n") % (
682 " (undo %s: %s)\n") % (
680 int(args[0]) - 1, args[1], args[2])
683 int(args[0]) - 1, args[1], args[2])
681 elif len(args) >= 2:
684 elif len(args) >= 2:
682 desc = _("rolling back to revision %s (undo %s)\n") % (
685 desc = _("rolling back to revision %s (undo %s)\n") % (
683 int(args[0]) - 1, args[1])
686 int(args[0]) - 1, args[1])
684 except IOError:
687 except IOError:
685 desc = _("rolling back unknown transaction\n")
688 desc = _("rolling back unknown transaction\n")
686 self.ui.status(desc)
689 self.ui.status(desc)
687 if dryrun:
690 if dryrun:
688 return
691 return
689 transaction.rollback(self.sopener, self.sjoin("undo"),
692 transaction.rollback(self.sopener, self.sjoin("undo"),
690 self.ui.warn)
693 self.ui.warn)
691 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
694 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
692 try:
695 try:
693 branch = self.opener("undo.branch").read()
696 branch = self.opener("undo.branch").read()
694 self.dirstate.setbranch(branch)
697 self.dirstate.setbranch(branch)
695 except IOError:
698 except IOError:
696 self.ui.warn(_("Named branch could not be reset, "
699 self.ui.warn(_("Named branch could not be reset, "
697 "current branch still is: %s\n")
700 "current branch still is: %s\n")
698 % encoding.tolocal(self.dirstate.branch()))
701 % encoding.tolocal(self.dirstate.branch()))
699 self.invalidate()
702 self.invalidate()
700 self.dirstate.invalidate()
703 self.dirstate.invalidate()
701 self.destroyed()
704 self.destroyed()
702 else:
705 else:
703 self.ui.warn(_("no rollback information available\n"))
706 self.ui.warn(_("no rollback information available\n"))
704 return 1
707 return 1
705 finally:
708 finally:
706 release(lock, wlock)
709 release(lock, wlock)
707
710
708 def invalidatecaches(self):
711 def invalidatecaches(self):
709 self._tags = None
712 self._tags = None
710 self._tagtypes = None
713 self._tagtypes = None
711 self.nodetagscache = None
714 self.nodetagscache = None
712 self._branchcache = None # in UTF-8
715 self._branchcache = None # in UTF-8
713 self._branchcachetip = None
716 self._branchcachetip = None
714
717
715 def invalidate(self):
718 def invalidate(self):
716 for a in "changelog manifest".split():
719 for a in "changelog manifest".split():
717 if a in self.__dict__:
720 if a in self.__dict__:
718 delattr(self, a)
721 delattr(self, a)
719 self.invalidatecaches()
722 self.invalidatecaches()
720
723
721 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
724 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
722 try:
725 try:
723 l = lock.lock(lockname, 0, releasefn, desc=desc)
726 l = lock.lock(lockname, 0, releasefn, desc=desc)
724 except error.LockHeld, inst:
727 except error.LockHeld, inst:
725 if not wait:
728 if not wait:
726 raise
729 raise
727 self.ui.warn(_("waiting for lock on %s held by %r\n") %
730 self.ui.warn(_("waiting for lock on %s held by %r\n") %
728 (desc, inst.locker))
731 (desc, inst.locker))
729 # default to 600 seconds timeout
732 # default to 600 seconds timeout
730 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
733 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
731 releasefn, desc=desc)
734 releasefn, desc=desc)
732 if acquirefn:
735 if acquirefn:
733 acquirefn()
736 acquirefn()
734 return l
737 return l
735
738
736 def lock(self, wait=True):
739 def lock(self, wait=True):
737 '''Lock the repository store (.hg/store) and return a weak reference
740 '''Lock the repository store (.hg/store) and return a weak reference
738 to the lock. Use this before modifying the store (e.g. committing or
741 to the lock. Use this before modifying the store (e.g. committing or
739 stripping). If you are opening a transaction, get a lock as well.)'''
742 stripping). If you are opening a transaction, get a lock as well.)'''
740 l = self._lockref and self._lockref()
743 l = self._lockref and self._lockref()
741 if l is not None and l.held:
744 if l is not None and l.held:
742 l.lock()
745 l.lock()
743 return l
746 return l
744
747
745 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
748 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
746 _('repository %s') % self.origroot)
749 _('repository %s') % self.origroot)
747 self._lockref = weakref.ref(l)
750 self._lockref = weakref.ref(l)
748 return l
751 return l
749
752
750 def wlock(self, wait=True):
753 def wlock(self, wait=True):
751 '''Lock the non-store parts of the repository (everything under
754 '''Lock the non-store parts of the repository (everything under
752 .hg except .hg/store) and return a weak reference to the lock.
755 .hg except .hg/store) and return a weak reference to the lock.
753 Use this before modifying files in .hg.'''
756 Use this before modifying files in .hg.'''
754 l = self._wlockref and self._wlockref()
757 l = self._wlockref and self._wlockref()
755 if l is not None and l.held:
758 if l is not None and l.held:
756 l.lock()
759 l.lock()
757 return l
760 return l
758
761
759 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
762 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
760 self.dirstate.invalidate, _('working directory of %s') %
763 self.dirstate.invalidate, _('working directory of %s') %
761 self.origroot)
764 self.origroot)
762 self._wlockref = weakref.ref(l)
765 self._wlockref = weakref.ref(l)
763 return l
766 return l
764
767
765 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
768 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
766 """
769 """
767 commit an individual file as part of a larger transaction
770 commit an individual file as part of a larger transaction
768 """
771 """
769
772
770 fname = fctx.path()
773 fname = fctx.path()
771 text = fctx.data()
774 text = fctx.data()
772 flog = self.file(fname)
775 flog = self.file(fname)
773 fparent1 = manifest1.get(fname, nullid)
776 fparent1 = manifest1.get(fname, nullid)
774 fparent2 = fparent2o = manifest2.get(fname, nullid)
777 fparent2 = fparent2o = manifest2.get(fname, nullid)
775
778
776 meta = {}
779 meta = {}
777 copy = fctx.renamed()
780 copy = fctx.renamed()
778 if copy and copy[0] != fname:
781 if copy and copy[0] != fname:
779 # Mark the new revision of this file as a copy of another
782 # Mark the new revision of this file as a copy of another
780 # file. This copy data will effectively act as a parent
783 # file. This copy data will effectively act as a parent
781 # of this new revision. If this is a merge, the first
784 # of this new revision. If this is a merge, the first
782 # parent will be the nullid (meaning "look up the copy data")
785 # parent will be the nullid (meaning "look up the copy data")
783 # and the second one will be the other parent. For example:
786 # and the second one will be the other parent. For example:
784 #
787 #
785 # 0 --- 1 --- 3 rev1 changes file foo
788 # 0 --- 1 --- 3 rev1 changes file foo
786 # \ / rev2 renames foo to bar and changes it
789 # \ / rev2 renames foo to bar and changes it
787 # \- 2 -/ rev3 should have bar with all changes and
790 # \- 2 -/ rev3 should have bar with all changes and
788 # should record that bar descends from
791 # should record that bar descends from
789 # bar in rev2 and foo in rev1
792 # bar in rev2 and foo in rev1
790 #
793 #
791 # this allows this merge to succeed:
794 # this allows this merge to succeed:
792 #
795 #
793 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
796 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
794 # \ / merging rev3 and rev4 should use bar@rev2
797 # \ / merging rev3 and rev4 should use bar@rev2
795 # \- 2 --- 4 as the merge base
798 # \- 2 --- 4 as the merge base
796 #
799 #
797
800
798 cfname = copy[0]
801 cfname = copy[0]
799 crev = manifest1.get(cfname)
802 crev = manifest1.get(cfname)
800 newfparent = fparent2
803 newfparent = fparent2
801
804
802 if manifest2: # branch merge
805 if manifest2: # branch merge
803 if fparent2 == nullid or crev is None: # copied on remote side
806 if fparent2 == nullid or crev is None: # copied on remote side
804 if cfname in manifest2:
807 if cfname in manifest2:
805 crev = manifest2[cfname]
808 crev = manifest2[cfname]
806 newfparent = fparent1
809 newfparent = fparent1
807
810
808 # find source in nearest ancestor if we've lost track
811 # find source in nearest ancestor if we've lost track
809 if not crev:
812 if not crev:
810 self.ui.debug(" %s: searching for copy revision for %s\n" %
813 self.ui.debug(" %s: searching for copy revision for %s\n" %
811 (fname, cfname))
814 (fname, cfname))
812 for ancestor in self['.'].ancestors():
815 for ancestor in self['.'].ancestors():
813 if cfname in ancestor:
816 if cfname in ancestor:
814 crev = ancestor[cfname].filenode()
817 crev = ancestor[cfname].filenode()
815 break
818 break
816
819
817 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
820 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
818 meta["copy"] = cfname
821 meta["copy"] = cfname
819 meta["copyrev"] = hex(crev)
822 meta["copyrev"] = hex(crev)
820 fparent1, fparent2 = nullid, newfparent
823 fparent1, fparent2 = nullid, newfparent
821 elif fparent2 != nullid:
824 elif fparent2 != nullid:
822 # is one parent an ancestor of the other?
825 # is one parent an ancestor of the other?
823 fparentancestor = flog.ancestor(fparent1, fparent2)
826 fparentancestor = flog.ancestor(fparent1, fparent2)
824 if fparentancestor == fparent1:
827 if fparentancestor == fparent1:
825 fparent1, fparent2 = fparent2, nullid
828 fparent1, fparent2 = fparent2, nullid
826 elif fparentancestor == fparent2:
829 elif fparentancestor == fparent2:
827 fparent2 = nullid
830 fparent2 = nullid
828
831
829 # is the file changed?
832 # is the file changed?
830 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
833 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
831 changelist.append(fname)
834 changelist.append(fname)
832 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
835 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
833
836
834 # are just the flags changed during merge?
837 # are just the flags changed during merge?
835 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
838 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
836 changelist.append(fname)
839 changelist.append(fname)
837
840
838 return fparent1
841 return fparent1
839
842
840 def commit(self, text="", user=None, date=None, match=None, force=False,
843 def commit(self, text="", user=None, date=None, match=None, force=False,
841 editor=False, extra={}):
844 editor=False, extra={}):
842 """Add a new revision to current repository.
845 """Add a new revision to current repository.
843
846
844 Revision information is gathered from the working directory,
847 Revision information is gathered from the working directory,
845 match can be used to filter the committed files. If editor is
848 match can be used to filter the committed files. If editor is
846 supplied, it is called to get a commit message.
849 supplied, it is called to get a commit message.
847 """
850 """
848
851
849 def fail(f, msg):
852 def fail(f, msg):
850 raise util.Abort('%s: %s' % (f, msg))
853 raise util.Abort('%s: %s' % (f, msg))
851
854
852 if not match:
855 if not match:
853 match = matchmod.always(self.root, '')
856 match = matchmod.always(self.root, '')
854
857
855 if not force:
858 if not force:
856 vdirs = []
859 vdirs = []
857 match.dir = vdirs.append
860 match.dir = vdirs.append
858 match.bad = fail
861 match.bad = fail
859
862
860 wlock = self.wlock()
863 wlock = self.wlock()
861 try:
864 try:
862 wctx = self[None]
865 wctx = self[None]
863 merge = len(wctx.parents()) > 1
866 merge = len(wctx.parents()) > 1
864
867
865 if (not force and merge and match and
868 if (not force and merge and match and
866 (match.files() or match.anypats())):
869 (match.files() or match.anypats())):
867 raise util.Abort(_('cannot partially commit a merge '
870 raise util.Abort(_('cannot partially commit a merge '
868 '(do not specify files or patterns)'))
871 '(do not specify files or patterns)'))
869
872
870 changes = self.status(match=match, clean=force)
873 changes = self.status(match=match, clean=force)
871 if force:
874 if force:
872 changes[0].extend(changes[6]) # mq may commit unchanged files
875 changes[0].extend(changes[6]) # mq may commit unchanged files
873
876
874 # check subrepos
877 # check subrepos
875 subs = []
878 subs = []
876 removedsubs = set()
879 removedsubs = set()
877 for p in wctx.parents():
880 for p in wctx.parents():
878 removedsubs.update(s for s in p.substate if match(s))
881 removedsubs.update(s for s in p.substate if match(s))
879 for s in wctx.substate:
882 for s in wctx.substate:
880 removedsubs.discard(s)
883 removedsubs.discard(s)
881 if match(s) and wctx.sub(s).dirty():
884 if match(s) and wctx.sub(s).dirty():
882 subs.append(s)
885 subs.append(s)
883 if (subs or removedsubs):
886 if (subs or removedsubs):
884 if (not match('.hgsub') and
887 if (not match('.hgsub') and
885 '.hgsub' in (wctx.modified() + wctx.added())):
888 '.hgsub' in (wctx.modified() + wctx.added())):
886 raise util.Abort(_("can't commit subrepos without .hgsub"))
889 raise util.Abort(_("can't commit subrepos without .hgsub"))
887 if '.hgsubstate' not in changes[0]:
890 if '.hgsubstate' not in changes[0]:
888 changes[0].insert(0, '.hgsubstate')
891 changes[0].insert(0, '.hgsubstate')
889
892
890 # make sure all explicit patterns are matched
893 # make sure all explicit patterns are matched
891 if not force and match.files():
894 if not force and match.files():
892 matched = set(changes[0] + changes[1] + changes[2])
895 matched = set(changes[0] + changes[1] + changes[2])
893
896
894 for f in match.files():
897 for f in match.files():
895 if f == '.' or f in matched or f in wctx.substate:
898 if f == '.' or f in matched or f in wctx.substate:
896 continue
899 continue
897 if f in changes[3]: # missing
900 if f in changes[3]: # missing
898 fail(f, _('file not found!'))
901 fail(f, _('file not found!'))
899 if f in vdirs: # visited directory
902 if f in vdirs: # visited directory
900 d = f + '/'
903 d = f + '/'
901 for mf in matched:
904 for mf in matched:
902 if mf.startswith(d):
905 if mf.startswith(d):
903 break
906 break
904 else:
907 else:
905 fail(f, _("no match under directory!"))
908 fail(f, _("no match under directory!"))
906 elif f not in self.dirstate:
909 elif f not in self.dirstate:
907 fail(f, _("file not tracked!"))
910 fail(f, _("file not tracked!"))
908
911
909 if (not force and not extra.get("close") and not merge
912 if (not force and not extra.get("close") and not merge
910 and not (changes[0] or changes[1] or changes[2])
913 and not (changes[0] or changes[1] or changes[2])
911 and wctx.branch() == wctx.p1().branch()):
914 and wctx.branch() == wctx.p1().branch()):
912 return None
915 return None
913
916
914 ms = mergemod.mergestate(self)
917 ms = mergemod.mergestate(self)
915 for f in changes[0]:
918 for f in changes[0]:
916 if f in ms and ms[f] == 'u':
919 if f in ms and ms[f] == 'u':
917 raise util.Abort(_("unresolved merge conflicts "
920 raise util.Abort(_("unresolved merge conflicts "
918 "(see hg resolve)"))
921 "(see hg resolve)"))
919
922
920 cctx = context.workingctx(self, text, user, date, extra, changes)
923 cctx = context.workingctx(self, text, user, date, extra, changes)
921 if editor:
924 if editor:
922 cctx._text = editor(self, cctx, subs)
925 cctx._text = editor(self, cctx, subs)
923 edited = (text != cctx._text)
926 edited = (text != cctx._text)
924
927
925 # commit subs
928 # commit subs
926 if subs or removedsubs:
929 if subs or removedsubs:
927 state = wctx.substate.copy()
930 state = wctx.substate.copy()
928 for s in sorted(subs):
931 for s in sorted(subs):
929 sub = wctx.sub(s)
932 sub = wctx.sub(s)
930 self.ui.status(_('committing subrepository %s\n') %
933 self.ui.status(_('committing subrepository %s\n') %
931 subrepo.relpath(sub))
934 subrepo.relpath(sub))
932 sr = sub.commit(cctx._text, user, date)
935 sr = sub.commit(cctx._text, user, date)
933 state[s] = (state[s][0], sr)
936 state[s] = (state[s][0], sr)
934 subrepo.writestate(self, state)
937 subrepo.writestate(self, state)
935
938
936 # Save commit message in case this transaction gets rolled back
939 # Save commit message in case this transaction gets rolled back
937 # (e.g. by a pretxncommit hook). Leave the content alone on
940 # (e.g. by a pretxncommit hook). Leave the content alone on
938 # the assumption that the user will use the same editor again.
941 # the assumption that the user will use the same editor again.
939 msgfile = self.opener('last-message.txt', 'wb')
942 msgfile = self.opener('last-message.txt', 'wb')
940 msgfile.write(cctx._text)
943 msgfile.write(cctx._text)
941 msgfile.close()
944 msgfile.close()
942
945
943 p1, p2 = self.dirstate.parents()
946 p1, p2 = self.dirstate.parents()
944 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
947 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
945 try:
948 try:
946 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
949 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
947 ret = self.commitctx(cctx, True)
950 ret = self.commitctx(cctx, True)
948 except:
951 except:
949 if edited:
952 if edited:
950 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
953 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
951 self.ui.write(
954 self.ui.write(
952 _('note: commit message saved in %s\n') % msgfn)
955 _('note: commit message saved in %s\n') % msgfn)
953 raise
956 raise
954
957
955 # update dirstate and mergestate
958 # update dirstate and mergestate
956 for f in changes[0] + changes[1]:
959 for f in changes[0] + changes[1]:
957 self.dirstate.normal(f)
960 self.dirstate.normal(f)
958 for f in changes[2]:
961 for f in changes[2]:
959 self.dirstate.forget(f)
962 self.dirstate.forget(f)
960 self.dirstate.setparents(ret)
963 self.dirstate.setparents(ret)
961 ms.reset()
964 ms.reset()
962 finally:
965 finally:
963 wlock.release()
966 wlock.release()
964
967
965 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
968 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
966 return ret
969 return ret
967
970
968 def commitctx(self, ctx, error=False):
971 def commitctx(self, ctx, error=False):
969 """Add a new revision to current repository.
972 """Add a new revision to current repository.
970 Revision information is passed via the context argument.
973 Revision information is passed via the context argument.
971 """
974 """
972
975
973 tr = lock = None
976 tr = lock = None
974 removed = ctx.removed()
977 removed = ctx.removed()
975 p1, p2 = ctx.p1(), ctx.p2()
978 p1, p2 = ctx.p1(), ctx.p2()
976 m1 = p1.manifest().copy()
979 m1 = p1.manifest().copy()
977 m2 = p2.manifest()
980 m2 = p2.manifest()
978 user = ctx.user()
981 user = ctx.user()
979
982
980 lock = self.lock()
983 lock = self.lock()
981 try:
984 try:
982 tr = self.transaction("commit")
985 tr = self.transaction("commit")
983 trp = weakref.proxy(tr)
986 trp = weakref.proxy(tr)
984
987
985 # check in files
988 # check in files
986 new = {}
989 new = {}
987 changed = []
990 changed = []
988 linkrev = len(self)
991 linkrev = len(self)
989 for f in sorted(ctx.modified() + ctx.added()):
992 for f in sorted(ctx.modified() + ctx.added()):
990 self.ui.note(f + "\n")
993 self.ui.note(f + "\n")
991 try:
994 try:
992 fctx = ctx[f]
995 fctx = ctx[f]
993 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
996 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
994 changed)
997 changed)
995 m1.set(f, fctx.flags())
998 m1.set(f, fctx.flags())
996 except OSError, inst:
999 except OSError, inst:
997 self.ui.warn(_("trouble committing %s!\n") % f)
1000 self.ui.warn(_("trouble committing %s!\n") % f)
998 raise
1001 raise
999 except IOError, inst:
1002 except IOError, inst:
1000 errcode = getattr(inst, 'errno', errno.ENOENT)
1003 errcode = getattr(inst, 'errno', errno.ENOENT)
1001 if error or errcode and errcode != errno.ENOENT:
1004 if error or errcode and errcode != errno.ENOENT:
1002 self.ui.warn(_("trouble committing %s!\n") % f)
1005 self.ui.warn(_("trouble committing %s!\n") % f)
1003 raise
1006 raise
1004 else:
1007 else:
1005 removed.append(f)
1008 removed.append(f)
1006
1009
1007 # update manifest
1010 # update manifest
1008 m1.update(new)
1011 m1.update(new)
1009 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1012 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1010 drop = [f for f in removed if f in m1]
1013 drop = [f for f in removed if f in m1]
1011 for f in drop:
1014 for f in drop:
1012 del m1[f]
1015 del m1[f]
1013 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1016 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1014 p2.manifestnode(), (new, drop))
1017 p2.manifestnode(), (new, drop))
1015
1018
1016 # update changelog
1019 # update changelog
1017 self.changelog.delayupdate()
1020 self.changelog.delayupdate()
1018 n = self.changelog.add(mn, changed + removed, ctx.description(),
1021 n = self.changelog.add(mn, changed + removed, ctx.description(),
1019 trp, p1.node(), p2.node(),
1022 trp, p1.node(), p2.node(),
1020 user, ctx.date(), ctx.extra().copy())
1023 user, ctx.date(), ctx.extra().copy())
1021 p = lambda: self.changelog.writepending() and self.root or ""
1024 p = lambda: self.changelog.writepending() and self.root or ""
1022 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1025 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1023 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1026 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1024 parent2=xp2, pending=p)
1027 parent2=xp2, pending=p)
1025 self.changelog.finalize(trp)
1028 self.changelog.finalize(trp)
1026 tr.close()
1029 tr.close()
1027
1030
1028 if self._branchcache:
1031 if self._branchcache:
1029 self.updatebranchcache()
1032 self.updatebranchcache()
1030 return n
1033 return n
1031 finally:
1034 finally:
1032 if tr:
1035 if tr:
1033 tr.release()
1036 tr.release()
1034 lock.release()
1037 lock.release()
1035
1038
1036 def destroyed(self):
1039 def destroyed(self):
1037 '''Inform the repository that nodes have been destroyed.
1040 '''Inform the repository that nodes have been destroyed.
1038 Intended for use by strip and rollback, so there's a common
1041 Intended for use by strip and rollback, so there's a common
1039 place for anything that has to be done after destroying history.'''
1042 place for anything that has to be done after destroying history.'''
1040 # XXX it might be nice if we could take the list of destroyed
1043 # XXX it might be nice if we could take the list of destroyed
1041 # nodes, but I don't see an easy way for rollback() to do that
1044 # nodes, but I don't see an easy way for rollback() to do that
1042
1045
1043 # Ensure the persistent tag cache is updated. Doing it now
1046 # Ensure the persistent tag cache is updated. Doing it now
1044 # means that the tag cache only has to worry about destroyed
1047 # means that the tag cache only has to worry about destroyed
1045 # heads immediately after a strip/rollback. That in turn
1048 # heads immediately after a strip/rollback. That in turn
1046 # guarantees that "cachetip == currenttip" (comparing both rev
1049 # guarantees that "cachetip == currenttip" (comparing both rev
1047 # and node) always means no nodes have been added or destroyed.
1050 # and node) always means no nodes have been added or destroyed.
1048
1051
1049 # XXX this is suboptimal when qrefresh'ing: we strip the current
1052 # XXX this is suboptimal when qrefresh'ing: we strip the current
1050 # head, refresh the tag cache, then immediately add a new head.
1053 # head, refresh the tag cache, then immediately add a new head.
1051 # But I think doing it this way is necessary for the "instant
1054 # But I think doing it this way is necessary for the "instant
1052 # tag cache retrieval" case to work.
1055 # tag cache retrieval" case to work.
1053 self.invalidatecaches()
1056 self.invalidatecaches()
1054
1057
1055 def walk(self, match, node=None):
1058 def walk(self, match, node=None):
1056 '''
1059 '''
1057 walk recursively through the directory tree or a given
1060 walk recursively through the directory tree or a given
1058 changeset, finding all files matched by the match
1061 changeset, finding all files matched by the match
1059 function
1062 function
1060 '''
1063 '''
1061 return self[node].walk(match)
1064 return self[node].walk(match)
1062
1065
1063 def status(self, node1='.', node2=None, match=None,
1066 def status(self, node1='.', node2=None, match=None,
1064 ignored=False, clean=False, unknown=False,
1067 ignored=False, clean=False, unknown=False,
1065 listsubrepos=False):
1068 listsubrepos=False):
1066 """return status of files between two nodes or node and working directory
1069 """return status of files between two nodes or node and working directory
1067
1070
1068 If node1 is None, use the first dirstate parent instead.
1071 If node1 is None, use the first dirstate parent instead.
1069 If node2 is None, compare node1 with working directory.
1072 If node2 is None, compare node1 with working directory.
1070 """
1073 """
1071
1074
1072 def mfmatches(ctx):
1075 def mfmatches(ctx):
1073 mf = ctx.manifest().copy()
1076 mf = ctx.manifest().copy()
1074 for fn in mf.keys():
1077 for fn in mf.keys():
1075 if not match(fn):
1078 if not match(fn):
1076 del mf[fn]
1079 del mf[fn]
1077 return mf
1080 return mf
1078
1081
1079 if isinstance(node1, context.changectx):
1082 if isinstance(node1, context.changectx):
1080 ctx1 = node1
1083 ctx1 = node1
1081 else:
1084 else:
1082 ctx1 = self[node1]
1085 ctx1 = self[node1]
1083 if isinstance(node2, context.changectx):
1086 if isinstance(node2, context.changectx):
1084 ctx2 = node2
1087 ctx2 = node2
1085 else:
1088 else:
1086 ctx2 = self[node2]
1089 ctx2 = self[node2]
1087
1090
1088 working = ctx2.rev() is None
1091 working = ctx2.rev() is None
1089 parentworking = working and ctx1 == self['.']
1092 parentworking = working and ctx1 == self['.']
1090 match = match or matchmod.always(self.root, self.getcwd())
1093 match = match or matchmod.always(self.root, self.getcwd())
1091 listignored, listclean, listunknown = ignored, clean, unknown
1094 listignored, listclean, listunknown = ignored, clean, unknown
1092
1095
1093 # load earliest manifest first for caching reasons
1096 # load earliest manifest first for caching reasons
1094 if not working and ctx2.rev() < ctx1.rev():
1097 if not working and ctx2.rev() < ctx1.rev():
1095 ctx2.manifest()
1098 ctx2.manifest()
1096
1099
1097 if not parentworking:
1100 if not parentworking:
1098 def bad(f, msg):
1101 def bad(f, msg):
1099 if f not in ctx1:
1102 if f not in ctx1:
1100 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1103 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1101 match.bad = bad
1104 match.bad = bad
1102
1105
1103 if working: # we need to scan the working dir
1106 if working: # we need to scan the working dir
1104 subrepos = []
1107 subrepos = []
1105 if '.hgsub' in self.dirstate:
1108 if '.hgsub' in self.dirstate:
1106 subrepos = ctx1.substate.keys()
1109 subrepos = ctx1.substate.keys()
1107 s = self.dirstate.status(match, subrepos, listignored,
1110 s = self.dirstate.status(match, subrepos, listignored,
1108 listclean, listunknown)
1111 listclean, listunknown)
1109 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1112 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1110
1113
1111 # check for any possibly clean files
1114 # check for any possibly clean files
1112 if parentworking and cmp:
1115 if parentworking and cmp:
1113 fixup = []
1116 fixup = []
1114 # do a full compare of any files that might have changed
1117 # do a full compare of any files that might have changed
1115 for f in sorted(cmp):
1118 for f in sorted(cmp):
1116 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1119 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1117 or ctx1[f].cmp(ctx2[f])):
1120 or ctx1[f].cmp(ctx2[f])):
1118 modified.append(f)
1121 modified.append(f)
1119 else:
1122 else:
1120 fixup.append(f)
1123 fixup.append(f)
1121
1124
1122 # update dirstate for files that are actually clean
1125 # update dirstate for files that are actually clean
1123 if fixup:
1126 if fixup:
1124 if listclean:
1127 if listclean:
1125 clean += fixup
1128 clean += fixup
1126
1129
1127 try:
1130 try:
1128 # updating the dirstate is optional
1131 # updating the dirstate is optional
1129 # so we don't wait on the lock
1132 # so we don't wait on the lock
1130 wlock = self.wlock(False)
1133 wlock = self.wlock(False)
1131 try:
1134 try:
1132 for f in fixup:
1135 for f in fixup:
1133 self.dirstate.normal(f)
1136 self.dirstate.normal(f)
1134 finally:
1137 finally:
1135 wlock.release()
1138 wlock.release()
1136 except error.LockError:
1139 except error.LockError:
1137 pass
1140 pass
1138
1141
1139 if not parentworking:
1142 if not parentworking:
1140 mf1 = mfmatches(ctx1)
1143 mf1 = mfmatches(ctx1)
1141 if working:
1144 if working:
1142 # we are comparing working dir against non-parent
1145 # we are comparing working dir against non-parent
1143 # generate a pseudo-manifest for the working dir
1146 # generate a pseudo-manifest for the working dir
1144 mf2 = mfmatches(self['.'])
1147 mf2 = mfmatches(self['.'])
1145 for f in cmp + modified + added:
1148 for f in cmp + modified + added:
1146 mf2[f] = None
1149 mf2[f] = None
1147 mf2.set(f, ctx2.flags(f))
1150 mf2.set(f, ctx2.flags(f))
1148 for f in removed:
1151 for f in removed:
1149 if f in mf2:
1152 if f in mf2:
1150 del mf2[f]
1153 del mf2[f]
1151 else:
1154 else:
1152 # we are comparing two revisions
1155 # we are comparing two revisions
1153 deleted, unknown, ignored = [], [], []
1156 deleted, unknown, ignored = [], [], []
1154 mf2 = mfmatches(ctx2)
1157 mf2 = mfmatches(ctx2)
1155
1158
1156 modified, added, clean = [], [], []
1159 modified, added, clean = [], [], []
1157 for fn in mf2:
1160 for fn in mf2:
1158 if fn in mf1:
1161 if fn in mf1:
1159 if (mf1.flags(fn) != mf2.flags(fn) or
1162 if (mf1.flags(fn) != mf2.flags(fn) or
1160 (mf1[fn] != mf2[fn] and
1163 (mf1[fn] != mf2[fn] and
1161 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1164 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1162 modified.append(fn)
1165 modified.append(fn)
1163 elif listclean:
1166 elif listclean:
1164 clean.append(fn)
1167 clean.append(fn)
1165 del mf1[fn]
1168 del mf1[fn]
1166 else:
1169 else:
1167 added.append(fn)
1170 added.append(fn)
1168 removed = mf1.keys()
1171 removed = mf1.keys()
1169
1172
1170 r = modified, added, removed, deleted, unknown, ignored, clean
1173 r = modified, added, removed, deleted, unknown, ignored, clean
1171
1174
1172 if listsubrepos:
1175 if listsubrepos:
1173 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1176 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1174 if working:
1177 if working:
1175 rev2 = None
1178 rev2 = None
1176 else:
1179 else:
1177 rev2 = ctx2.substate[subpath][1]
1180 rev2 = ctx2.substate[subpath][1]
1178 try:
1181 try:
1179 submatch = matchmod.narrowmatcher(subpath, match)
1182 submatch = matchmod.narrowmatcher(subpath, match)
1180 s = sub.status(rev2, match=submatch, ignored=listignored,
1183 s = sub.status(rev2, match=submatch, ignored=listignored,
1181 clean=listclean, unknown=listunknown,
1184 clean=listclean, unknown=listunknown,
1182 listsubrepos=True)
1185 listsubrepos=True)
1183 for rfiles, sfiles in zip(r, s):
1186 for rfiles, sfiles in zip(r, s):
1184 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1187 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1185 except error.LookupError:
1188 except error.LookupError:
1186 self.ui.status(_("skipping missing subrepository: %s\n")
1189 self.ui.status(_("skipping missing subrepository: %s\n")
1187 % subpath)
1190 % subpath)
1188
1191
1189 [l.sort() for l in r]
1192 [l.sort() for l in r]
1190 return r
1193 return r
1191
1194
1192 def heads(self, start=None):
1195 def heads(self, start=None):
1193 heads = self.changelog.heads(start)
1196 heads = self.changelog.heads(start)
1194 # sort the output in rev descending order
1197 # sort the output in rev descending order
1195 heads = [(-self.changelog.rev(h), h) for h in heads]
1198 heads = [(-self.changelog.rev(h), h) for h in heads]
1196 return [n for (r, n) in sorted(heads)]
1199 return [n for (r, n) in sorted(heads)]
1197
1200
1198 def branchheads(self, branch=None, start=None, closed=False):
1201 def branchheads(self, branch=None, start=None, closed=False):
1199 '''return a (possibly filtered) list of heads for the given branch
1202 '''return a (possibly filtered) list of heads for the given branch
1200
1203
1201 Heads are returned in topological order, from newest to oldest.
1204 Heads are returned in topological order, from newest to oldest.
1202 If branch is None, use the dirstate branch.
1205 If branch is None, use the dirstate branch.
1203 If start is not None, return only heads reachable from start.
1206 If start is not None, return only heads reachable from start.
1204 If closed is True, return heads that are marked as closed as well.
1207 If closed is True, return heads that are marked as closed as well.
1205 '''
1208 '''
1206 if branch is None:
1209 if branch is None:
1207 branch = self[None].branch()
1210 branch = self[None].branch()
1208 branches = self.branchmap()
1211 branches = self.branchmap()
1209 if branch not in branches:
1212 if branch not in branches:
1210 return []
1213 return []
1211 # the cache returns heads ordered lowest to highest
1214 # the cache returns heads ordered lowest to highest
1212 bheads = list(reversed(branches[branch]))
1215 bheads = list(reversed(branches[branch]))
1213 if start is not None:
1216 if start is not None:
1214 # filter out the heads that cannot be reached from startrev
1217 # filter out the heads that cannot be reached from startrev
1215 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1218 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1216 bheads = [h for h in bheads if h in fbheads]
1219 bheads = [h for h in bheads if h in fbheads]
1217 if not closed:
1220 if not closed:
1218 bheads = [h for h in bheads if
1221 bheads = [h for h in bheads if
1219 ('close' not in self.changelog.read(h)[5])]
1222 ('close' not in self.changelog.read(h)[5])]
1220 return bheads
1223 return bheads
1221
1224
1222 def branches(self, nodes):
1225 def branches(self, nodes):
1223 if not nodes:
1226 if not nodes:
1224 nodes = [self.changelog.tip()]
1227 nodes = [self.changelog.tip()]
1225 b = []
1228 b = []
1226 for n in nodes:
1229 for n in nodes:
1227 t = n
1230 t = n
1228 while 1:
1231 while 1:
1229 p = self.changelog.parents(n)
1232 p = self.changelog.parents(n)
1230 if p[1] != nullid or p[0] == nullid:
1233 if p[1] != nullid or p[0] == nullid:
1231 b.append((t, n, p[0], p[1]))
1234 b.append((t, n, p[0], p[1]))
1232 break
1235 break
1233 n = p[0]
1236 n = p[0]
1234 return b
1237 return b
1235
1238
1236 def between(self, pairs):
1239 def between(self, pairs):
1237 r = []
1240 r = []
1238
1241
1239 for top, bottom in pairs:
1242 for top, bottom in pairs:
1240 n, l, i = top, [], 0
1243 n, l, i = top, [], 0
1241 f = 1
1244 f = 1
1242
1245
1243 while n != bottom and n != nullid:
1246 while n != bottom and n != nullid:
1244 p = self.changelog.parents(n)[0]
1247 p = self.changelog.parents(n)[0]
1245 if i == f:
1248 if i == f:
1246 l.append(n)
1249 l.append(n)
1247 f = f * 2
1250 f = f * 2
1248 n = p
1251 n = p
1249 i += 1
1252 i += 1
1250
1253
1251 r.append(l)
1254 r.append(l)
1252
1255
1253 return r
1256 return r
1254
1257
1255 def pull(self, remote, heads=None, force=False):
1258 def pull(self, remote, heads=None, force=False):
1256 lock = self.lock()
1259 lock = self.lock()
1257 try:
1260 try:
1258 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1261 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1259 force=force)
1262 force=force)
1260 common, fetch, rheads = tmp
1263 common, fetch, rheads = tmp
1261 if not fetch:
1264 if not fetch:
1262 self.ui.status(_("no changes found\n"))
1265 self.ui.status(_("no changes found\n"))
1263 return 0
1266 return 0
1264
1267
1265 if fetch == [nullid]:
1268 if fetch == [nullid]:
1266 self.ui.status(_("requesting all changes\n"))
1269 self.ui.status(_("requesting all changes\n"))
1267 elif heads is None and remote.capable('changegroupsubset'):
1270 elif heads is None and remote.capable('changegroupsubset'):
1268 # issue1320, avoid a race if remote changed after discovery
1271 # issue1320, avoid a race if remote changed after discovery
1269 heads = rheads
1272 heads = rheads
1270
1273
1271 if heads is None:
1274 if heads is None:
1272 cg = remote.changegroup(fetch, 'pull')
1275 cg = remote.changegroup(fetch, 'pull')
1273 else:
1276 else:
1274 if not remote.capable('changegroupsubset'):
1277 if not remote.capable('changegroupsubset'):
1275 raise util.Abort(_("partial pull cannot be done because "
1278 raise util.Abort(_("partial pull cannot be done because "
1276 "other repository doesn't support "
1279 "other repository doesn't support "
1277 "changegroupsubset."))
1280 "changegroupsubset."))
1278 cg = remote.changegroupsubset(fetch, heads, 'pull')
1281 cg = remote.changegroupsubset(fetch, heads, 'pull')
1279 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1282 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1280 finally:
1283 finally:
1281 lock.release()
1284 lock.release()
1282
1285
1283 def push(self, remote, force=False, revs=None, newbranch=False):
1286 def push(self, remote, force=False, revs=None, newbranch=False):
1284 '''Push outgoing changesets (limited by revs) from the current
1287 '''Push outgoing changesets (limited by revs) from the current
1285 repository to remote. Return an integer:
1288 repository to remote. Return an integer:
1286 - 0 means HTTP error *or* nothing to push
1289 - 0 means HTTP error *or* nothing to push
1287 - 1 means we pushed and remote head count is unchanged *or*
1290 - 1 means we pushed and remote head count is unchanged *or*
1288 we have outgoing changesets but refused to push
1291 we have outgoing changesets but refused to push
1289 - other values as described by addchangegroup()
1292 - other values as described by addchangegroup()
1290 '''
1293 '''
1291 # there are two ways to push to remote repo:
1294 # there are two ways to push to remote repo:
1292 #
1295 #
1293 # addchangegroup assumes local user can lock remote
1296 # addchangegroup assumes local user can lock remote
1294 # repo (local filesystem, old ssh servers).
1297 # repo (local filesystem, old ssh servers).
1295 #
1298 #
1296 # unbundle assumes local user cannot lock remote repo (new ssh
1299 # unbundle assumes local user cannot lock remote repo (new ssh
1297 # servers, http servers).
1300 # servers, http servers).
1298
1301
1299 lock = None
1302 lock = None
1300 unbundle = remote.capable('unbundle')
1303 unbundle = remote.capable('unbundle')
1301 if not unbundle:
1304 if not unbundle:
1302 lock = remote.lock()
1305 lock = remote.lock()
1303 try:
1306 try:
1304 ret = discovery.prepush(self, remote, force, revs, newbranch)
1307 ret = discovery.prepush(self, remote, force, revs, newbranch)
1305 if ret[0] is None:
1308 if ret[0] is None:
1306 # and here we return 0 for "nothing to push" or 1 for
1309 # and here we return 0 for "nothing to push" or 1 for
1307 # "something to push but I refuse"
1310 # "something to push but I refuse"
1308 return ret[1]
1311 return ret[1]
1309
1312
1310 cg, remote_heads = ret
1313 cg, remote_heads = ret
1311 if unbundle:
1314 if unbundle:
1312 # local repo finds heads on server, finds out what revs it must
1315 # local repo finds heads on server, finds out what revs it must
1313 # push. once revs transferred, if server finds it has
1316 # push. once revs transferred, if server finds it has
1314 # different heads (someone else won commit/push race), server
1317 # different heads (someone else won commit/push race), server
1315 # aborts.
1318 # aborts.
1316 if force:
1319 if force:
1317 remote_heads = ['force']
1320 remote_heads = ['force']
1318 # ssh: return remote's addchangegroup()
1321 # ssh: return remote's addchangegroup()
1319 # http: return remote's addchangegroup() or 0 for error
1322 # http: return remote's addchangegroup() or 0 for error
1320 return remote.unbundle(cg, remote_heads, 'push')
1323 return remote.unbundle(cg, remote_heads, 'push')
1321 else:
1324 else:
1322 # we return an integer indicating remote head count change
1325 # we return an integer indicating remote head count change
1323 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1326 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1324 finally:
1327 finally:
1325 if lock is not None:
1328 if lock is not None:
1326 lock.release()
1329 lock.release()
1327
1330
1328 def changegroupinfo(self, nodes, source):
1331 def changegroupinfo(self, nodes, source):
1329 if self.ui.verbose or source == 'bundle':
1332 if self.ui.verbose or source == 'bundle':
1330 self.ui.status(_("%d changesets found\n") % len(nodes))
1333 self.ui.status(_("%d changesets found\n") % len(nodes))
1331 if self.ui.debugflag:
1334 if self.ui.debugflag:
1332 self.ui.debug("list of changesets:\n")
1335 self.ui.debug("list of changesets:\n")
1333 for node in nodes:
1336 for node in nodes:
1334 self.ui.debug("%s\n" % hex(node))
1337 self.ui.debug("%s\n" % hex(node))
1335
1338
1336 def changegroupsubset(self, bases, heads, source, extranodes=None):
1339 def changegroupsubset(self, bases, heads, source, extranodes=None):
1337 """Compute a changegroup consisting of all the nodes that are
1340 """Compute a changegroup consisting of all the nodes that are
1338 descendents of any of the bases and ancestors of any of the heads.
1341 descendents of any of the bases and ancestors of any of the heads.
1339 Return a chunkbuffer object whose read() method will return
1342 Return a chunkbuffer object whose read() method will return
1340 successive changegroup chunks.
1343 successive changegroup chunks.
1341
1344
1342 It is fairly complex as determining which filenodes and which
1345 It is fairly complex as determining which filenodes and which
1343 manifest nodes need to be included for the changeset to be complete
1346 manifest nodes need to be included for the changeset to be complete
1344 is non-trivial.
1347 is non-trivial.
1345
1348
1346 Another wrinkle is doing the reverse, figuring out which changeset in
1349 Another wrinkle is doing the reverse, figuring out which changeset in
1347 the changegroup a particular filenode or manifestnode belongs to.
1350 the changegroup a particular filenode or manifestnode belongs to.
1348
1351
1349 The caller can specify some nodes that must be included in the
1352 The caller can specify some nodes that must be included in the
1350 changegroup using the extranodes argument. It should be a dict
1353 changegroup using the extranodes argument. It should be a dict
1351 where the keys are the filenames (or 1 for the manifest), and the
1354 where the keys are the filenames (or 1 for the manifest), and the
1352 values are lists of (node, linknode) tuples, where node is a wanted
1355 values are lists of (node, linknode) tuples, where node is a wanted
1353 node and linknode is the changelog node that should be transmitted as
1356 node and linknode is the changelog node that should be transmitted as
1354 the linkrev.
1357 the linkrev.
1355 """
1358 """
1356
1359
1357 # Set up some initial variables
1360 # Set up some initial variables
1358 # Make it easy to refer to self.changelog
1361 # Make it easy to refer to self.changelog
1359 cl = self.changelog
1362 cl = self.changelog
1360 # Compute the list of changesets in this changegroup.
1363 # Compute the list of changesets in this changegroup.
1361 # Some bases may turn out to be superfluous, and some heads may be
1364 # Some bases may turn out to be superfluous, and some heads may be
1362 # too. nodesbetween will return the minimal set of bases and heads
1365 # too. nodesbetween will return the minimal set of bases and heads
1363 # necessary to re-create the changegroup.
1366 # necessary to re-create the changegroup.
1364 if not bases:
1367 if not bases:
1365 bases = [nullid]
1368 bases = [nullid]
1366 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1369 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1367
1370
1368 if extranodes is None:
1371 if extranodes is None:
1369 # can we go through the fast path ?
1372 # can we go through the fast path ?
1370 heads.sort()
1373 heads.sort()
1371 allheads = self.heads()
1374 allheads = self.heads()
1372 allheads.sort()
1375 allheads.sort()
1373 if heads == allheads:
1376 if heads == allheads:
1374 return self._changegroup(msng_cl_lst, source)
1377 return self._changegroup(msng_cl_lst, source)
1375
1378
1376 # slow path
1379 # slow path
1377 self.hook('preoutgoing', throw=True, source=source)
1380 self.hook('preoutgoing', throw=True, source=source)
1378
1381
1379 self.changegroupinfo(msng_cl_lst, source)
1382 self.changegroupinfo(msng_cl_lst, source)
1380
1383
1381 # We assume that all ancestors of bases are known
1384 # We assume that all ancestors of bases are known
1382 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1385 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1383
1386
1384 # Make it easy to refer to self.manifest
1387 # Make it easy to refer to self.manifest
1385 mnfst = self.manifest
1388 mnfst = self.manifest
1386 # We don't know which manifests are missing yet
1389 # We don't know which manifests are missing yet
1387 msng_mnfst_set = {}
1390 msng_mnfst_set = {}
1388 # Nor do we know which filenodes are missing.
1391 # Nor do we know which filenodes are missing.
1389 msng_filenode_set = {}
1392 msng_filenode_set = {}
1390
1393
1391 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1394 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1392 junk = None
1395 junk = None
1393
1396
1394 # A changeset always belongs to itself, so the changenode lookup
1397 # A changeset always belongs to itself, so the changenode lookup
1395 # function for a changenode is identity.
1398 # function for a changenode is identity.
1396 def identity(x):
1399 def identity(x):
1397 return x
1400 return x
1398
1401
1399 # A function generating function that sets up the initial environment
1402 # A function generating function that sets up the initial environment
1400 # the inner function.
1403 # the inner function.
1401 def filenode_collector(changedfiles):
1404 def filenode_collector(changedfiles):
1402 # This gathers information from each manifestnode included in the
1405 # This gathers information from each manifestnode included in the
1403 # changegroup about which filenodes the manifest node references
1406 # changegroup about which filenodes the manifest node references
1404 # so we can include those in the changegroup too.
1407 # so we can include those in the changegroup too.
1405 #
1408 #
1406 # It also remembers which changenode each filenode belongs to. It
1409 # It also remembers which changenode each filenode belongs to. It
1407 # does this by assuming the a filenode belongs to the changenode
1410 # does this by assuming the a filenode belongs to the changenode
1408 # the first manifest that references it belongs to.
1411 # the first manifest that references it belongs to.
1409 def collect_msng_filenodes(mnfstnode):
1412 def collect_msng_filenodes(mnfstnode):
1410 r = mnfst.rev(mnfstnode)
1413 r = mnfst.rev(mnfstnode)
1411 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1414 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1412 # If the previous rev is one of the parents,
1415 # If the previous rev is one of the parents,
1413 # we only need to see a diff.
1416 # we only need to see a diff.
1414 deltamf = mnfst.readdelta(mnfstnode)
1417 deltamf = mnfst.readdelta(mnfstnode)
1415 # For each line in the delta
1418 # For each line in the delta
1416 for f, fnode in deltamf.iteritems():
1419 for f, fnode in deltamf.iteritems():
1417 # And if the file is in the list of files we care
1420 # And if the file is in the list of files we care
1418 # about.
1421 # about.
1419 if f in changedfiles:
1422 if f in changedfiles:
1420 # Get the changenode this manifest belongs to
1423 # Get the changenode this manifest belongs to
1421 clnode = msng_mnfst_set[mnfstnode]
1424 clnode = msng_mnfst_set[mnfstnode]
1422 # Create the set of filenodes for the file if
1425 # Create the set of filenodes for the file if
1423 # there isn't one already.
1426 # there isn't one already.
1424 ndset = msng_filenode_set.setdefault(f, {})
1427 ndset = msng_filenode_set.setdefault(f, {})
1425 # And set the filenode's changelog node to the
1428 # And set the filenode's changelog node to the
1426 # manifest's if it hasn't been set already.
1429 # manifest's if it hasn't been set already.
1427 ndset.setdefault(fnode, clnode)
1430 ndset.setdefault(fnode, clnode)
1428 else:
1431 else:
1429 # Otherwise we need a full manifest.
1432 # Otherwise we need a full manifest.
1430 m = mnfst.read(mnfstnode)
1433 m = mnfst.read(mnfstnode)
1431 # For every file in we care about.
1434 # For every file in we care about.
1432 for f in changedfiles:
1435 for f in changedfiles:
1433 fnode = m.get(f, None)
1436 fnode = m.get(f, None)
1434 # If it's in the manifest
1437 # If it's in the manifest
1435 if fnode is not None:
1438 if fnode is not None:
1436 # See comments above.
1439 # See comments above.
1437 clnode = msng_mnfst_set[mnfstnode]
1440 clnode = msng_mnfst_set[mnfstnode]
1438 ndset = msng_filenode_set.setdefault(f, {})
1441 ndset = msng_filenode_set.setdefault(f, {})
1439 ndset.setdefault(fnode, clnode)
1442 ndset.setdefault(fnode, clnode)
1440 return collect_msng_filenodes
1443 return collect_msng_filenodes
1441
1444
1442 # If we determine that a particular file or manifest node must be a
1445 # If we determine that a particular file or manifest node must be a
1443 # node that the recipient of the changegroup will already have, we can
1446 # node that the recipient of the changegroup will already have, we can
1444 # also assume the recipient will have all the parents. This function
1447 # also assume the recipient will have all the parents. This function
1445 # prunes them from the set of missing nodes.
1448 # prunes them from the set of missing nodes.
1446 def prune(revlog, missingnodes):
1449 def prune(revlog, missingnodes):
1447 hasset = set()
1450 hasset = set()
1448 # If a 'missing' filenode thinks it belongs to a changenode we
1451 # If a 'missing' filenode thinks it belongs to a changenode we
1449 # assume the recipient must have, then the recipient must have
1452 # assume the recipient must have, then the recipient must have
1450 # that filenode.
1453 # that filenode.
1451 for n in missingnodes:
1454 for n in missingnodes:
1452 clrev = revlog.linkrev(revlog.rev(n))
1455 clrev = revlog.linkrev(revlog.rev(n))
1453 if clrev in commonrevs:
1456 if clrev in commonrevs:
1454 hasset.add(n)
1457 hasset.add(n)
1455 for n in hasset:
1458 for n in hasset:
1456 missingnodes.pop(n, None)
1459 missingnodes.pop(n, None)
1457 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1460 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1458 missingnodes.pop(revlog.node(r), None)
1461 missingnodes.pop(revlog.node(r), None)
1459
1462
1460 # Add the nodes that were explicitly requested.
1463 # Add the nodes that were explicitly requested.
1461 def add_extra_nodes(name, nodes):
1464 def add_extra_nodes(name, nodes):
1462 if not extranodes or name not in extranodes:
1465 if not extranodes or name not in extranodes:
1463 return
1466 return
1464
1467
1465 for node, linknode in extranodes[name]:
1468 for node, linknode in extranodes[name]:
1466 if node not in nodes:
1469 if node not in nodes:
1467 nodes[node] = linknode
1470 nodes[node] = linknode
1468
1471
1469 # Now that we have all theses utility functions to help out and
1472 # Now that we have all theses utility functions to help out and
1470 # logically divide up the task, generate the group.
1473 # logically divide up the task, generate the group.
1471 def gengroup():
1474 def gengroup():
1472 # The set of changed files starts empty.
1475 # The set of changed files starts empty.
1473 changedfiles = set()
1476 changedfiles = set()
1474 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1477 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1475
1478
1476 # Create a changenode group generator that will call our functions
1479 # Create a changenode group generator that will call our functions
1477 # back to lookup the owning changenode and collect information.
1480 # back to lookup the owning changenode and collect information.
1478 group = cl.group(msng_cl_lst, identity, collect)
1481 group = cl.group(msng_cl_lst, identity, collect)
1479 for cnt, chnk in enumerate(group):
1482 for cnt, chnk in enumerate(group):
1480 yield chnk
1483 yield chnk
1481 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1484 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1482 self.ui.progress(_('bundling changes'), None)
1485 self.ui.progress(_('bundling changes'), None)
1483
1486
1484 prune(mnfst, msng_mnfst_set)
1487 prune(mnfst, msng_mnfst_set)
1485 add_extra_nodes(1, msng_mnfst_set)
1488 add_extra_nodes(1, msng_mnfst_set)
1486 msng_mnfst_lst = msng_mnfst_set.keys()
1489 msng_mnfst_lst = msng_mnfst_set.keys()
1487 # Sort the manifestnodes by revision number.
1490 # Sort the manifestnodes by revision number.
1488 msng_mnfst_lst.sort(key=mnfst.rev)
1491 msng_mnfst_lst.sort(key=mnfst.rev)
1489 # Create a generator for the manifestnodes that calls our lookup
1492 # Create a generator for the manifestnodes that calls our lookup
1490 # and data collection functions back.
1493 # and data collection functions back.
1491 group = mnfst.group(msng_mnfst_lst,
1494 group = mnfst.group(msng_mnfst_lst,
1492 lambda mnode: msng_mnfst_set[mnode],
1495 lambda mnode: msng_mnfst_set[mnode],
1493 filenode_collector(changedfiles))
1496 filenode_collector(changedfiles))
1494 for cnt, chnk in enumerate(group):
1497 for cnt, chnk in enumerate(group):
1495 yield chnk
1498 yield chnk
1496 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1499 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1497 self.ui.progress(_('bundling manifests'), None)
1500 self.ui.progress(_('bundling manifests'), None)
1498
1501
1499 # These are no longer needed, dereference and toss the memory for
1502 # These are no longer needed, dereference and toss the memory for
1500 # them.
1503 # them.
1501 msng_mnfst_lst = None
1504 msng_mnfst_lst = None
1502 msng_mnfst_set.clear()
1505 msng_mnfst_set.clear()
1503
1506
1504 if extranodes:
1507 if extranodes:
1505 for fname in extranodes:
1508 for fname in extranodes:
1506 if isinstance(fname, int):
1509 if isinstance(fname, int):
1507 continue
1510 continue
1508 msng_filenode_set.setdefault(fname, {})
1511 msng_filenode_set.setdefault(fname, {})
1509 changedfiles.add(fname)
1512 changedfiles.add(fname)
1510 # Go through all our files in order sorted by name.
1513 # Go through all our files in order sorted by name.
1511 cnt = 0
1514 cnt = 0
1512 for fname in sorted(changedfiles):
1515 for fname in sorted(changedfiles):
1513 filerevlog = self.file(fname)
1516 filerevlog = self.file(fname)
1514 if not len(filerevlog):
1517 if not len(filerevlog):
1515 raise util.Abort(_("empty or missing revlog for %s") % fname)
1518 raise util.Abort(_("empty or missing revlog for %s") % fname)
1516 # Toss out the filenodes that the recipient isn't really
1519 # Toss out the filenodes that the recipient isn't really
1517 # missing.
1520 # missing.
1518 missingfnodes = msng_filenode_set.pop(fname, {})
1521 missingfnodes = msng_filenode_set.pop(fname, {})
1519 prune(filerevlog, missingfnodes)
1522 prune(filerevlog, missingfnodes)
1520 add_extra_nodes(fname, missingfnodes)
1523 add_extra_nodes(fname, missingfnodes)
1521 # If any filenodes are left, generate the group for them,
1524 # If any filenodes are left, generate the group for them,
1522 # otherwise don't bother.
1525 # otherwise don't bother.
1523 if missingfnodes:
1526 if missingfnodes:
1524 yield changegroup.chunkheader(len(fname))
1527 yield changegroup.chunkheader(len(fname))
1525 yield fname
1528 yield fname
1526 # Sort the filenodes by their revision # (topological order)
1529 # Sort the filenodes by their revision # (topological order)
1527 nodeiter = list(missingfnodes)
1530 nodeiter = list(missingfnodes)
1528 nodeiter.sort(key=filerevlog.rev)
1531 nodeiter.sort(key=filerevlog.rev)
1529 # Create a group generator and only pass in a changenode
1532 # Create a group generator and only pass in a changenode
1530 # lookup function as we need to collect no information
1533 # lookup function as we need to collect no information
1531 # from filenodes.
1534 # from filenodes.
1532 group = filerevlog.group(nodeiter,
1535 group = filerevlog.group(nodeiter,
1533 lambda fnode: missingfnodes[fnode])
1536 lambda fnode: missingfnodes[fnode])
1534 for chnk in group:
1537 for chnk in group:
1535 self.ui.progress(
1538 self.ui.progress(
1536 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1539 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1537 cnt += 1
1540 cnt += 1
1538 yield chnk
1541 yield chnk
1539 # Signal that no more groups are left.
1542 # Signal that no more groups are left.
1540 yield changegroup.closechunk()
1543 yield changegroup.closechunk()
1541 self.ui.progress(_('bundling files'), None)
1544 self.ui.progress(_('bundling files'), None)
1542
1545
1543 if msng_cl_lst:
1546 if msng_cl_lst:
1544 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1547 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1545
1548
1546 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1549 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1547
1550
1548 def changegroup(self, basenodes, source):
1551 def changegroup(self, basenodes, source):
1549 # to avoid a race we use changegroupsubset() (issue1320)
1552 # to avoid a race we use changegroupsubset() (issue1320)
1550 return self.changegroupsubset(basenodes, self.heads(), source)
1553 return self.changegroupsubset(basenodes, self.heads(), source)
1551
1554
1552 def _changegroup(self, nodes, source):
1555 def _changegroup(self, nodes, source):
1553 """Compute the changegroup of all nodes that we have that a recipient
1556 """Compute the changegroup of all nodes that we have that a recipient
1554 doesn't. Return a chunkbuffer object whose read() method will return
1557 doesn't. Return a chunkbuffer object whose read() method will return
1555 successive changegroup chunks.
1558 successive changegroup chunks.
1556
1559
1557 This is much easier than the previous function as we can assume that
1560 This is much easier than the previous function as we can assume that
1558 the recipient has any changenode we aren't sending them.
1561 the recipient has any changenode we aren't sending them.
1559
1562
1560 nodes is the set of nodes to send"""
1563 nodes is the set of nodes to send"""
1561
1564
1562 self.hook('preoutgoing', throw=True, source=source)
1565 self.hook('preoutgoing', throw=True, source=source)
1563
1566
1564 cl = self.changelog
1567 cl = self.changelog
1565 revset = set([cl.rev(n) for n in nodes])
1568 revset = set([cl.rev(n) for n in nodes])
1566 self.changegroupinfo(nodes, source)
1569 self.changegroupinfo(nodes, source)
1567
1570
1568 def identity(x):
1571 def identity(x):
1569 return x
1572 return x
1570
1573
1571 def gennodelst(log):
1574 def gennodelst(log):
1572 for r in log:
1575 for r in log:
1573 if log.linkrev(r) in revset:
1576 if log.linkrev(r) in revset:
1574 yield log.node(r)
1577 yield log.node(r)
1575
1578
1576 def lookuplinkrev_func(revlog):
1579 def lookuplinkrev_func(revlog):
1577 def lookuplinkrev(n):
1580 def lookuplinkrev(n):
1578 return cl.node(revlog.linkrev(revlog.rev(n)))
1581 return cl.node(revlog.linkrev(revlog.rev(n)))
1579 return lookuplinkrev
1582 return lookuplinkrev
1580
1583
1581 def gengroup():
1584 def gengroup():
1582 '''yield a sequence of changegroup chunks (strings)'''
1585 '''yield a sequence of changegroup chunks (strings)'''
1583 # construct a list of all changed files
1586 # construct a list of all changed files
1584 changedfiles = set()
1587 changedfiles = set()
1585 mmfs = {}
1588 mmfs = {}
1586 collect = changegroup.collector(cl, mmfs, changedfiles)
1589 collect = changegroup.collector(cl, mmfs, changedfiles)
1587
1590
1588 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1591 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1589 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1592 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1590 yield chnk
1593 yield chnk
1591 self.ui.progress(_('bundling changes'), None)
1594 self.ui.progress(_('bundling changes'), None)
1592
1595
1593 mnfst = self.manifest
1596 mnfst = self.manifest
1594 nodeiter = gennodelst(mnfst)
1597 nodeiter = gennodelst(mnfst)
1595 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1598 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1596 lookuplinkrev_func(mnfst))):
1599 lookuplinkrev_func(mnfst))):
1597 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1600 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1598 yield chnk
1601 yield chnk
1599 self.ui.progress(_('bundling manifests'), None)
1602 self.ui.progress(_('bundling manifests'), None)
1600
1603
1601 cnt = 0
1604 cnt = 0
1602 for fname in sorted(changedfiles):
1605 for fname in sorted(changedfiles):
1603 filerevlog = self.file(fname)
1606 filerevlog = self.file(fname)
1604 if not len(filerevlog):
1607 if not len(filerevlog):
1605 raise util.Abort(_("empty or missing revlog for %s") % fname)
1608 raise util.Abort(_("empty or missing revlog for %s") % fname)
1606 nodeiter = gennodelst(filerevlog)
1609 nodeiter = gennodelst(filerevlog)
1607 nodeiter = list(nodeiter)
1610 nodeiter = list(nodeiter)
1608 if nodeiter:
1611 if nodeiter:
1609 yield changegroup.chunkheader(len(fname))
1612 yield changegroup.chunkheader(len(fname))
1610 yield fname
1613 yield fname
1611 lookup = lookuplinkrev_func(filerevlog)
1614 lookup = lookuplinkrev_func(filerevlog)
1612 for chnk in filerevlog.group(nodeiter, lookup):
1615 for chnk in filerevlog.group(nodeiter, lookup):
1613 self.ui.progress(
1616 self.ui.progress(
1614 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1617 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1615 cnt += 1
1618 cnt += 1
1616 yield chnk
1619 yield chnk
1617 self.ui.progress(_('bundling files'), None)
1620 self.ui.progress(_('bundling files'), None)
1618
1621
1619 yield changegroup.closechunk()
1622 yield changegroup.closechunk()
1620
1623
1621 if nodes:
1624 if nodes:
1622 self.hook('outgoing', node=hex(nodes[0]), source=source)
1625 self.hook('outgoing', node=hex(nodes[0]), source=source)
1623
1626
1624 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1627 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1625
1628
1626 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1629 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1627 """Add the changegroup returned by source.read() to this repo.
1630 """Add the changegroup returned by source.read() to this repo.
1628 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1631 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1629 the URL of the repo where this changegroup is coming from.
1632 the URL of the repo where this changegroup is coming from.
1630
1633
1631 Return an integer summarizing the change to this repo:
1634 Return an integer summarizing the change to this repo:
1632 - nothing changed or no source: 0
1635 - nothing changed or no source: 0
1633 - more heads than before: 1+added heads (2..n)
1636 - more heads than before: 1+added heads (2..n)
1634 - fewer heads than before: -1-removed heads (-2..-n)
1637 - fewer heads than before: -1-removed heads (-2..-n)
1635 - number of heads stays the same: 1
1638 - number of heads stays the same: 1
1636 """
1639 """
1637 def csmap(x):
1640 def csmap(x):
1638 self.ui.debug("add changeset %s\n" % short(x))
1641 self.ui.debug("add changeset %s\n" % short(x))
1639 return len(cl)
1642 return len(cl)
1640
1643
1641 def revmap(x):
1644 def revmap(x):
1642 return cl.rev(x)
1645 return cl.rev(x)
1643
1646
1644 if not source:
1647 if not source:
1645 return 0
1648 return 0
1646
1649
1647 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1650 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1648
1651
1649 changesets = files = revisions = 0
1652 changesets = files = revisions = 0
1650 efiles = set()
1653 efiles = set()
1651
1654
1652 # write changelog data to temp files so concurrent readers will not see
1655 # write changelog data to temp files so concurrent readers will not see
1653 # inconsistent view
1656 # inconsistent view
1654 cl = self.changelog
1657 cl = self.changelog
1655 cl.delayupdate()
1658 cl.delayupdate()
1656 oldheads = len(cl.heads())
1659 oldheads = len(cl.heads())
1657
1660
1658 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1661 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1659 try:
1662 try:
1660 trp = weakref.proxy(tr)
1663 trp = weakref.proxy(tr)
1661 # pull off the changeset group
1664 # pull off the changeset group
1662 self.ui.status(_("adding changesets\n"))
1665 self.ui.status(_("adding changesets\n"))
1663 clstart = len(cl)
1666 clstart = len(cl)
1664 class prog(object):
1667 class prog(object):
1665 step = _('changesets')
1668 step = _('changesets')
1666 count = 1
1669 count = 1
1667 ui = self.ui
1670 ui = self.ui
1668 total = None
1671 total = None
1669 def __call__(self):
1672 def __call__(self):
1670 self.ui.progress(self.step, self.count, unit=_('chunks'),
1673 self.ui.progress(self.step, self.count, unit=_('chunks'),
1671 total=self.total)
1674 total=self.total)
1672 self.count += 1
1675 self.count += 1
1673 pr = prog()
1676 pr = prog()
1674 source.callback = pr
1677 source.callback = pr
1675
1678
1676 if (cl.addgroup(source, csmap, trp) is None
1679 if (cl.addgroup(source, csmap, trp) is None
1677 and not emptyok):
1680 and not emptyok):
1678 raise util.Abort(_("received changelog group is empty"))
1681 raise util.Abort(_("received changelog group is empty"))
1679 clend = len(cl)
1682 clend = len(cl)
1680 changesets = clend - clstart
1683 changesets = clend - clstart
1681 for c in xrange(clstart, clend):
1684 for c in xrange(clstart, clend):
1682 efiles.update(self[c].files())
1685 efiles.update(self[c].files())
1683 efiles = len(efiles)
1686 efiles = len(efiles)
1684 self.ui.progress(_('changesets'), None)
1687 self.ui.progress(_('changesets'), None)
1685
1688
1686 # pull off the manifest group
1689 # pull off the manifest group
1687 self.ui.status(_("adding manifests\n"))
1690 self.ui.status(_("adding manifests\n"))
1688 pr.step = _('manifests')
1691 pr.step = _('manifests')
1689 pr.count = 1
1692 pr.count = 1
1690 pr.total = changesets # manifests <= changesets
1693 pr.total = changesets # manifests <= changesets
1691 # no need to check for empty manifest group here:
1694 # no need to check for empty manifest group here:
1692 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1695 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1693 # no new manifest will be created and the manifest group will
1696 # no new manifest will be created and the manifest group will
1694 # be empty during the pull
1697 # be empty during the pull
1695 self.manifest.addgroup(source, revmap, trp)
1698 self.manifest.addgroup(source, revmap, trp)
1696 self.ui.progress(_('manifests'), None)
1699 self.ui.progress(_('manifests'), None)
1697
1700
1698 needfiles = {}
1701 needfiles = {}
1699 if self.ui.configbool('server', 'validate', default=False):
1702 if self.ui.configbool('server', 'validate', default=False):
1700 # validate incoming csets have their manifests
1703 # validate incoming csets have their manifests
1701 for cset in xrange(clstart, clend):
1704 for cset in xrange(clstart, clend):
1702 mfest = self.changelog.read(self.changelog.node(cset))[0]
1705 mfest = self.changelog.read(self.changelog.node(cset))[0]
1703 mfest = self.manifest.readdelta(mfest)
1706 mfest = self.manifest.readdelta(mfest)
1704 # store file nodes we must see
1707 # store file nodes we must see
1705 for f, n in mfest.iteritems():
1708 for f, n in mfest.iteritems():
1706 needfiles.setdefault(f, set()).add(n)
1709 needfiles.setdefault(f, set()).add(n)
1707
1710
1708 # process the files
1711 # process the files
1709 self.ui.status(_("adding file changes\n"))
1712 self.ui.status(_("adding file changes\n"))
1710 pr.step = 'files'
1713 pr.step = 'files'
1711 pr.count = 1
1714 pr.count = 1
1712 pr.total = efiles
1715 pr.total = efiles
1713 source.callback = None
1716 source.callback = None
1714
1717
1715 while 1:
1718 while 1:
1716 f = source.chunk()
1719 f = source.chunk()
1717 if not f:
1720 if not f:
1718 break
1721 break
1719 self.ui.debug("adding %s revisions\n" % f)
1722 self.ui.debug("adding %s revisions\n" % f)
1720 pr()
1723 pr()
1721 fl = self.file(f)
1724 fl = self.file(f)
1722 o = len(fl)
1725 o = len(fl)
1723 if fl.addgroup(source, revmap, trp) is None:
1726 if fl.addgroup(source, revmap, trp) is None:
1724 raise util.Abort(_("received file revlog group is empty"))
1727 raise util.Abort(_("received file revlog group is empty"))
1725 revisions += len(fl) - o
1728 revisions += len(fl) - o
1726 files += 1
1729 files += 1
1727 if f in needfiles:
1730 if f in needfiles:
1728 needs = needfiles[f]
1731 needs = needfiles[f]
1729 for new in xrange(o, len(fl)):
1732 for new in xrange(o, len(fl)):
1730 n = fl.node(new)
1733 n = fl.node(new)
1731 if n in needs:
1734 if n in needs:
1732 needs.remove(n)
1735 needs.remove(n)
1733 if not needs:
1736 if not needs:
1734 del needfiles[f]
1737 del needfiles[f]
1735 self.ui.progress(_('files'), None)
1738 self.ui.progress(_('files'), None)
1736
1739
1737 for f, needs in needfiles.iteritems():
1740 for f, needs in needfiles.iteritems():
1738 fl = self.file(f)
1741 fl = self.file(f)
1739 for n in needs:
1742 for n in needs:
1740 try:
1743 try:
1741 fl.rev(n)
1744 fl.rev(n)
1742 except error.LookupError:
1745 except error.LookupError:
1743 raise util.Abort(
1746 raise util.Abort(
1744 _('missing file data for %s:%s - run hg verify') %
1747 _('missing file data for %s:%s - run hg verify') %
1745 (f, hex(n)))
1748 (f, hex(n)))
1746
1749
1747 newheads = len(cl.heads())
1750 newheads = len(cl.heads())
1748 heads = ""
1751 heads = ""
1749 if oldheads and newheads != oldheads:
1752 if oldheads and newheads != oldheads:
1750 heads = _(" (%+d heads)") % (newheads - oldheads)
1753 heads = _(" (%+d heads)") % (newheads - oldheads)
1751
1754
1752 self.ui.status(_("added %d changesets"
1755 self.ui.status(_("added %d changesets"
1753 " with %d changes to %d files%s\n")
1756 " with %d changes to %d files%s\n")
1754 % (changesets, revisions, files, heads))
1757 % (changesets, revisions, files, heads))
1755
1758
1756 if changesets > 0:
1759 if changesets > 0:
1757 p = lambda: cl.writepending() and self.root or ""
1760 p = lambda: cl.writepending() and self.root or ""
1758 self.hook('pretxnchangegroup', throw=True,
1761 self.hook('pretxnchangegroup', throw=True,
1759 node=hex(cl.node(clstart)), source=srctype,
1762 node=hex(cl.node(clstart)), source=srctype,
1760 url=url, pending=p)
1763 url=url, pending=p)
1761
1764
1762 # make changelog see real files again
1765 # make changelog see real files again
1763 cl.finalize(trp)
1766 cl.finalize(trp)
1764
1767
1765 tr.close()
1768 tr.close()
1766 finally:
1769 finally:
1767 tr.release()
1770 tr.release()
1768 if lock:
1771 if lock:
1769 lock.release()
1772 lock.release()
1770
1773
1771 if changesets > 0:
1774 if changesets > 0:
1772 # forcefully update the on-disk branch cache
1775 # forcefully update the on-disk branch cache
1773 self.ui.debug("updating the branch cache\n")
1776 self.ui.debug("updating the branch cache\n")
1774 self.updatebranchcache()
1777 self.updatebranchcache()
1775 self.hook("changegroup", node=hex(cl.node(clstart)),
1778 self.hook("changegroup", node=hex(cl.node(clstart)),
1776 source=srctype, url=url)
1779 source=srctype, url=url)
1777
1780
1778 for i in xrange(clstart, clend):
1781 for i in xrange(clstart, clend):
1779 self.hook("incoming", node=hex(cl.node(i)),
1782 self.hook("incoming", node=hex(cl.node(i)),
1780 source=srctype, url=url)
1783 source=srctype, url=url)
1781
1784
1782 # never return 0 here:
1785 # never return 0 here:
1783 if newheads < oldheads:
1786 if newheads < oldheads:
1784 return newheads - oldheads - 1
1787 return newheads - oldheads - 1
1785 else:
1788 else:
1786 return newheads - oldheads + 1
1789 return newheads - oldheads + 1
1787
1790
1788
1791
1789 def stream_in(self, remote, requirements):
1792 def stream_in(self, remote, requirements):
1790 fp = remote.stream_out()
1793 fp = remote.stream_out()
1791 l = fp.readline()
1794 l = fp.readline()
1792 try:
1795 try:
1793 resp = int(l)
1796 resp = int(l)
1794 except ValueError:
1797 except ValueError:
1795 raise error.ResponseError(
1798 raise error.ResponseError(
1796 _('Unexpected response from remote server:'), l)
1799 _('Unexpected response from remote server:'), l)
1797 if resp == 1:
1800 if resp == 1:
1798 raise util.Abort(_('operation forbidden by server'))
1801 raise util.Abort(_('operation forbidden by server'))
1799 elif resp == 2:
1802 elif resp == 2:
1800 raise util.Abort(_('locking the remote repository failed'))
1803 raise util.Abort(_('locking the remote repository failed'))
1801 elif resp != 0:
1804 elif resp != 0:
1802 raise util.Abort(_('the server sent an unknown error code'))
1805 raise util.Abort(_('the server sent an unknown error code'))
1803 self.ui.status(_('streaming all changes\n'))
1806 self.ui.status(_('streaming all changes\n'))
1804 l = fp.readline()
1807 l = fp.readline()
1805 try:
1808 try:
1806 total_files, total_bytes = map(int, l.split(' ', 1))
1809 total_files, total_bytes = map(int, l.split(' ', 1))
1807 except (ValueError, TypeError):
1810 except (ValueError, TypeError):
1808 raise error.ResponseError(
1811 raise error.ResponseError(
1809 _('Unexpected response from remote server:'), l)
1812 _('Unexpected response from remote server:'), l)
1810 self.ui.status(_('%d files to transfer, %s of data\n') %
1813 self.ui.status(_('%d files to transfer, %s of data\n') %
1811 (total_files, util.bytecount(total_bytes)))
1814 (total_files, util.bytecount(total_bytes)))
1812 start = time.time()
1815 start = time.time()
1813 for i in xrange(total_files):
1816 for i in xrange(total_files):
1814 # XXX doesn't support '\n' or '\r' in filenames
1817 # XXX doesn't support '\n' or '\r' in filenames
1815 l = fp.readline()
1818 l = fp.readline()
1816 try:
1819 try:
1817 name, size = l.split('\0', 1)
1820 name, size = l.split('\0', 1)
1818 size = int(size)
1821 size = int(size)
1819 except (ValueError, TypeError):
1822 except (ValueError, TypeError):
1820 raise error.ResponseError(
1823 raise error.ResponseError(
1821 _('Unexpected response from remote server:'), l)
1824 _('Unexpected response from remote server:'), l)
1822 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1825 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1823 # for backwards compat, name was partially encoded
1826 # for backwards compat, name was partially encoded
1824 ofp = self.sopener(store.decodedir(name), 'w')
1827 ofp = self.sopener(store.decodedir(name), 'w')
1825 for chunk in util.filechunkiter(fp, limit=size):
1828 for chunk in util.filechunkiter(fp, limit=size):
1826 ofp.write(chunk)
1829 ofp.write(chunk)
1827 ofp.close()
1830 ofp.close()
1828 elapsed = time.time() - start
1831 elapsed = time.time() - start
1829 if elapsed <= 0:
1832 if elapsed <= 0:
1830 elapsed = 0.001
1833 elapsed = 0.001
1831 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1834 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1832 (util.bytecount(total_bytes), elapsed,
1835 (util.bytecount(total_bytes), elapsed,
1833 util.bytecount(total_bytes / elapsed)))
1836 util.bytecount(total_bytes / elapsed)))
1834
1837
1835 # new requirements = old non-format requirements + new format-related
1838 # new requirements = old non-format requirements + new format-related
1836 # requirements from the streamed-in repository
1839 # requirements from the streamed-in repository
1837 requirements.update(set(self.requirements) - self.supportedformats)
1840 requirements.update(set(self.requirements) - self.supportedformats)
1838 self._applyrequirements(requirements)
1841 self._applyrequirements(requirements)
1839 self._writerequirements()
1842 self._writerequirements()
1840
1843
1841 self.invalidate()
1844 self.invalidate()
1842 return len(self.heads()) + 1
1845 return len(self.heads()) + 1
1843
1846
1844 def clone(self, remote, heads=[], stream=False):
1847 def clone(self, remote, heads=[], stream=False):
1845 '''clone remote repository.
1848 '''clone remote repository.
1846
1849
1847 keyword arguments:
1850 keyword arguments:
1848 heads: list of revs to clone (forces use of pull)
1851 heads: list of revs to clone (forces use of pull)
1849 stream: use streaming clone if possible'''
1852 stream: use streaming clone if possible'''
1850
1853
1851 # now, all clients that can request uncompressed clones can
1854 # now, all clients that can request uncompressed clones can
1852 # read repo formats supported by all servers that can serve
1855 # read repo formats supported by all servers that can serve
1853 # them.
1856 # them.
1854
1857
1855 # if revlog format changes, client will have to check version
1858 # if revlog format changes, client will have to check version
1856 # and format flags on "stream" capability, and use
1859 # and format flags on "stream" capability, and use
1857 # uncompressed only if compatible.
1860 # uncompressed only if compatible.
1858
1861
1859 if stream and not heads:
1862 if stream and not heads:
1860 # 'stream' means remote revlog format is revlogv1 only
1863 # 'stream' means remote revlog format is revlogv1 only
1861 if remote.capable('stream'):
1864 if remote.capable('stream'):
1862 return self.stream_in(remote, set(('revlogv1',)))
1865 return self.stream_in(remote, set(('revlogv1',)))
1863 # otherwise, 'streamreqs' contains the remote revlog format
1866 # otherwise, 'streamreqs' contains the remote revlog format
1864 streamreqs = remote.capable('streamreqs')
1867 streamreqs = remote.capable('streamreqs')
1865 if streamreqs:
1868 if streamreqs:
1866 streamreqs = set(streamreqs.split(','))
1869 streamreqs = set(streamreqs.split(','))
1867 # if we support it, stream in and adjust our requirements
1870 # if we support it, stream in and adjust our requirements
1868 if not streamreqs - self.supportedformats:
1871 if not streamreqs - self.supportedformats:
1869 return self.stream_in(remote, streamreqs)
1872 return self.stream_in(remote, streamreqs)
1870 return self.pull(remote, heads)
1873 return self.pull(remote, heads)
1871
1874
1872 def pushkey(self, namespace, key, old, new):
1875 def pushkey(self, namespace, key, old, new):
1873 return pushkey.push(self, namespace, key, old, new)
1876 return pushkey.push(self, namespace, key, old, new)
1874
1877
1875 def listkeys(self, namespace):
1878 def listkeys(self, namespace):
1876 return pushkey.list(self, namespace)
1879 return pushkey.list(self, namespace)
1877
1880
1878 # used to avoid circular references so destructors work
1881 # used to avoid circular references so destructors work
1879 def aftertrans(files):
1882 def aftertrans(files):
1880 renamefiles = [tuple(t) for t in files]
1883 renamefiles = [tuple(t) for t in files]
1881 def a():
1884 def a():
1882 for src, dest in renamefiles:
1885 for src, dest in renamefiles:
1883 util.rename(src, dest)
1886 util.rename(src, dest)
1884 return a
1887 return a
1885
1888
1886 def instance(ui, path, create):
1889 def instance(ui, path, create):
1887 return localrepository(ui, util.drop_scheme('file', path), create)
1890 return localrepository(ui, util.drop_scheme('file', path), create)
1888
1891
1889 def islocal(path):
1892 def islocal(path):
1890 return True
1893 return True
@@ -1,333 +1,338 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import osutil, util
9 import osutil, util
10 import os, stat
10 import os, stat
11
11
12 _sha = util.sha1
12 _sha = util.sha1
13
13
14 # This avoids a collision between a file named foo and a dir named
14 # This avoids a collision between a file named foo and a dir named
15 # foo.i or foo.d
15 # foo.i or foo.d
16 def encodedir(path):
16 def encodedir(path):
17 if not path.startswith('data/'):
17 if not path.startswith('data/'):
18 return path
18 return path
19 return (path
19 return (path
20 .replace(".hg/", ".hg.hg/")
20 .replace(".hg/", ".hg.hg/")
21 .replace(".i/", ".i.hg/")
21 .replace(".i/", ".i.hg/")
22 .replace(".d/", ".d.hg/"))
22 .replace(".d/", ".d.hg/"))
23
23
24 def decodedir(path):
24 def decodedir(path):
25 if not path.startswith('data/') or ".hg/" not in path:
25 if not path.startswith('data/') or ".hg/" not in path:
26 return path
26 return path
27 return (path
27 return (path
28 .replace(".d.hg/", ".d/")
28 .replace(".d.hg/", ".d/")
29 .replace(".i.hg/", ".i/")
29 .replace(".i.hg/", ".i/")
30 .replace(".hg.hg/", ".hg/"))
30 .replace(".hg.hg/", ".hg/"))
31
31
32 def _buildencodefun():
32 def _buildencodefun():
33 e = '_'
33 e = '_'
34 win_reserved = [ord(x) for x in '\\:*?"<>|']
34 win_reserved = [ord(x) for x in '\\:*?"<>|']
35 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
35 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
36 for x in (range(32) + range(126, 256) + win_reserved):
36 for x in (range(32) + range(126, 256) + win_reserved):
37 cmap[chr(x)] = "~%02x" % x
37 cmap[chr(x)] = "~%02x" % x
38 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
38 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
39 cmap[chr(x)] = e + chr(x).lower()
39 cmap[chr(x)] = e + chr(x).lower()
40 dmap = {}
40 dmap = {}
41 for k, v in cmap.iteritems():
41 for k, v in cmap.iteritems():
42 dmap[v] = k
42 dmap[v] = k
43 def decode(s):
43 def decode(s):
44 i = 0
44 i = 0
45 while i < len(s):
45 while i < len(s):
46 for l in xrange(1, 4):
46 for l in xrange(1, 4):
47 try:
47 try:
48 yield dmap[s[i:i + l]]
48 yield dmap[s[i:i + l]]
49 i += l
49 i += l
50 break
50 break
51 except KeyError:
51 except KeyError:
52 pass
52 pass
53 else:
53 else:
54 raise KeyError
54 raise KeyError
55 return (lambda s: "".join([cmap[c] for c in encodedir(s)]),
55 return (lambda s: "".join([cmap[c] for c in encodedir(s)]),
56 lambda s: decodedir("".join(list(decode(s)))))
56 lambda s: decodedir("".join(list(decode(s)))))
57
57
58 encodefilename, decodefilename = _buildencodefun()
58 encodefilename, decodefilename = _buildencodefun()
59
59
60 def _build_lower_encodefun():
60 def _build_lower_encodefun():
61 win_reserved = [ord(x) for x in '\\:*?"<>|']
61 win_reserved = [ord(x) for x in '\\:*?"<>|']
62 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
62 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
63 for x in (range(32) + range(126, 256) + win_reserved):
63 for x in (range(32) + range(126, 256) + win_reserved):
64 cmap[chr(x)] = "~%02x" % x
64 cmap[chr(x)] = "~%02x" % x
65 for x in range(ord("A"), ord("Z")+1):
65 for x in range(ord("A"), ord("Z")+1):
66 cmap[chr(x)] = chr(x).lower()
66 cmap[chr(x)] = chr(x).lower()
67 return lambda s: "".join([cmap[c] for c in s])
67 return lambda s: "".join([cmap[c] for c in s])
68
68
69 lowerencode = _build_lower_encodefun()
69 lowerencode = _build_lower_encodefun()
70
70
71 _windows_reserved_filenames = '''con prn aux nul
71 _windows_reserved_filenames = '''con prn aux nul
72 com1 com2 com3 com4 com5 com6 com7 com8 com9
72 com1 com2 com3 com4 com5 com6 com7 com8 com9
73 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
73 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
74 def auxencode(path):
74 def _auxencode(path, dotencode):
75 res = []
75 res = []
76 for n in path.split('/'):
76 for n in path.split('/'):
77 if n:
77 if n:
78 base = n.split('.')[0]
78 base = n.split('.')[0]
79 if base and (base in _windows_reserved_filenames):
79 if base and (base in _windows_reserved_filenames):
80 # encode third letter ('aux' -> 'au~78')
80 # encode third letter ('aux' -> 'au~78')
81 ec = "~%02x" % ord(n[2])
81 ec = "~%02x" % ord(n[2])
82 n = n[0:2] + ec + n[3:]
82 n = n[0:2] + ec + n[3:]
83 if n[-1] in '. ':
83 if n[-1] in '. ':
84 # encode last period or space ('foo...' -> 'foo..~2e')
84 # encode last period or space ('foo...' -> 'foo..~2e')
85 n = n[:-1] + "~%02x" % ord(n[-1])
85 n = n[:-1] + "~%02x" % ord(n[-1])
86 if dotencode and n[0] in '. ':
87 n = "~%02x" % ord(n[0]) + n[1:]
86 res.append(n)
88 res.append(n)
87 return '/'.join(res)
89 return '/'.join(res)
88
90
89 MAX_PATH_LEN_IN_HGSTORE = 120
91 MAX_PATH_LEN_IN_HGSTORE = 120
90 DIR_PREFIX_LEN = 8
92 DIR_PREFIX_LEN = 8
91 _MAX_SHORTENED_DIRS_LEN = 8 * (DIR_PREFIX_LEN + 1) - 4
93 _MAX_SHORTENED_DIRS_LEN = 8 * (DIR_PREFIX_LEN + 1) - 4
92 def hybridencode(path):
94 def _hybridencode(path, auxencode):
93 '''encodes path with a length limit
95 '''encodes path with a length limit
94
96
95 Encodes all paths that begin with 'data/', according to the following.
97 Encodes all paths that begin with 'data/', according to the following.
96
98
97 Default encoding (reversible):
99 Default encoding (reversible):
98
100
99 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
101 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
100 characters are encoded as '~xx', where xx is the two digit hex code
102 characters are encoded as '~xx', where xx is the two digit hex code
101 of the character (see encodefilename).
103 of the character (see encodefilename).
102 Relevant path components consisting of Windows reserved filenames are
104 Relevant path components consisting of Windows reserved filenames are
103 masked by encoding the third character ('aux' -> 'au~78', see auxencode).
105 masked by encoding the third character ('aux' -> 'au~78', see auxencode).
104
106
105 Hashed encoding (not reversible):
107 Hashed encoding (not reversible):
106
108
107 If the default-encoded path is longer than MAX_PATH_LEN_IN_HGSTORE, a
109 If the default-encoded path is longer than MAX_PATH_LEN_IN_HGSTORE, a
108 non-reversible hybrid hashing of the path is done instead.
110 non-reversible hybrid hashing of the path is done instead.
109 This encoding uses up to DIR_PREFIX_LEN characters of all directory
111 This encoding uses up to DIR_PREFIX_LEN characters of all directory
110 levels of the lowerencoded path, but not more levels than can fit into
112 levels of the lowerencoded path, but not more levels than can fit into
111 _MAX_SHORTENED_DIRS_LEN.
113 _MAX_SHORTENED_DIRS_LEN.
112 Then follows the filler followed by the sha digest of the full path.
114 Then follows the filler followed by the sha digest of the full path.
113 The filler is the beginning of the basename of the lowerencoded path
115 The filler is the beginning of the basename of the lowerencoded path
114 (the basename is everything after the last path separator). The filler
116 (the basename is everything after the last path separator). The filler
115 is as long as possible, filling in characters from the basename until
117 is as long as possible, filling in characters from the basename until
116 the encoded path has MAX_PATH_LEN_IN_HGSTORE characters (or all chars
118 the encoded path has MAX_PATH_LEN_IN_HGSTORE characters (or all chars
117 of the basename have been taken).
119 of the basename have been taken).
118 The extension (e.g. '.i' or '.d') is preserved.
120 The extension (e.g. '.i' or '.d') is preserved.
119
121
120 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
122 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
121 encoding was used.
123 encoding was used.
122 '''
124 '''
123 if not path.startswith('data/'):
125 if not path.startswith('data/'):
124 return path
126 return path
125 # escape directories ending with .i and .d
127 # escape directories ending with .i and .d
126 path = encodedir(path)
128 path = encodedir(path)
127 ndpath = path[len('data/'):]
129 ndpath = path[len('data/'):]
128 res = 'data/' + auxencode(encodefilename(ndpath))
130 res = 'data/' + auxencode(encodefilename(ndpath))
129 if len(res) > MAX_PATH_LEN_IN_HGSTORE:
131 if len(res) > MAX_PATH_LEN_IN_HGSTORE:
130 digest = _sha(path).hexdigest()
132 digest = _sha(path).hexdigest()
131 aep = auxencode(lowerencode(ndpath))
133 aep = auxencode(lowerencode(ndpath))
132 _root, ext = os.path.splitext(aep)
134 _root, ext = os.path.splitext(aep)
133 parts = aep.split('/')
135 parts = aep.split('/')
134 basename = parts[-1]
136 basename = parts[-1]
135 sdirs = []
137 sdirs = []
136 for p in parts[:-1]:
138 for p in parts[:-1]:
137 d = p[:DIR_PREFIX_LEN]
139 d = p[:DIR_PREFIX_LEN]
138 if d[-1] in '. ':
140 if d[-1] in '. ':
139 # Windows can't access dirs ending in period or space
141 # Windows can't access dirs ending in period or space
140 d = d[:-1] + '_'
142 d = d[:-1] + '_'
141 t = '/'.join(sdirs) + '/' + d
143 t = '/'.join(sdirs) + '/' + d
142 if len(t) > _MAX_SHORTENED_DIRS_LEN:
144 if len(t) > _MAX_SHORTENED_DIRS_LEN:
143 break
145 break
144 sdirs.append(d)
146 sdirs.append(d)
145 dirs = '/'.join(sdirs)
147 dirs = '/'.join(sdirs)
146 if len(dirs) > 0:
148 if len(dirs) > 0:
147 dirs += '/'
149 dirs += '/'
148 res = 'dh/' + dirs + digest + ext
150 res = 'dh/' + dirs + digest + ext
149 space_left = MAX_PATH_LEN_IN_HGSTORE - len(res)
151 space_left = MAX_PATH_LEN_IN_HGSTORE - len(res)
150 if space_left > 0:
152 if space_left > 0:
151 filler = basename[:space_left]
153 filler = basename[:space_left]
152 res = 'dh/' + dirs + filler + digest + ext
154 res = 'dh/' + dirs + filler + digest + ext
153 return res
155 return res
154
156
155 def _calcmode(path):
157 def _calcmode(path):
156 try:
158 try:
157 # files in .hg/ will be created using this mode
159 # files in .hg/ will be created using this mode
158 mode = os.stat(path).st_mode
160 mode = os.stat(path).st_mode
159 # avoid some useless chmods
161 # avoid some useless chmods
160 if (0777 & ~util.umask) == (0777 & mode):
162 if (0777 & ~util.umask) == (0777 & mode):
161 mode = None
163 mode = None
162 except OSError:
164 except OSError:
163 mode = None
165 mode = None
164 return mode
166 return mode
165
167
166 _data = 'data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
168 _data = 'data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
167
169
168 class basicstore(object):
170 class basicstore(object):
169 '''base class for local repository stores'''
171 '''base class for local repository stores'''
170 def __init__(self, path, opener, pathjoiner):
172 def __init__(self, path, opener, pathjoiner):
171 self.pathjoiner = pathjoiner
173 self.pathjoiner = pathjoiner
172 self.path = path
174 self.path = path
173 self.createmode = _calcmode(path)
175 self.createmode = _calcmode(path)
174 op = opener(self.path)
176 op = opener(self.path)
175 op.createmode = self.createmode
177 op.createmode = self.createmode
176 self.opener = lambda f, *args, **kw: op(encodedir(f), *args, **kw)
178 self.opener = lambda f, *args, **kw: op(encodedir(f), *args, **kw)
177
179
178 def join(self, f):
180 def join(self, f):
179 return self.pathjoiner(self.path, encodedir(f))
181 return self.pathjoiner(self.path, encodedir(f))
180
182
181 def _walk(self, relpath, recurse):
183 def _walk(self, relpath, recurse):
182 '''yields (unencoded, encoded, size)'''
184 '''yields (unencoded, encoded, size)'''
183 path = self.pathjoiner(self.path, relpath)
185 path = self.pathjoiner(self.path, relpath)
184 striplen = len(self.path) + len(os.sep)
186 striplen = len(self.path) + len(os.sep)
185 l = []
187 l = []
186 if os.path.isdir(path):
188 if os.path.isdir(path):
187 visit = [path]
189 visit = [path]
188 while visit:
190 while visit:
189 p = visit.pop()
191 p = visit.pop()
190 for f, kind, st in osutil.listdir(p, stat=True):
192 for f, kind, st in osutil.listdir(p, stat=True):
191 fp = self.pathjoiner(p, f)
193 fp = self.pathjoiner(p, f)
192 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
194 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
193 n = util.pconvert(fp[striplen:])
195 n = util.pconvert(fp[striplen:])
194 l.append((decodedir(n), n, st.st_size))
196 l.append((decodedir(n), n, st.st_size))
195 elif kind == stat.S_IFDIR and recurse:
197 elif kind == stat.S_IFDIR and recurse:
196 visit.append(fp)
198 visit.append(fp)
197 return sorted(l)
199 return sorted(l)
198
200
199 def datafiles(self):
201 def datafiles(self):
200 return self._walk('data', True)
202 return self._walk('data', True)
201
203
202 def walk(self):
204 def walk(self):
203 '''yields (unencoded, encoded, size)'''
205 '''yields (unencoded, encoded, size)'''
204 # yield data files first
206 # yield data files first
205 for x in self.datafiles():
207 for x in self.datafiles():
206 yield x
208 yield x
207 # yield manifest before changelog
209 # yield manifest before changelog
208 for x in reversed(self._walk('', False)):
210 for x in reversed(self._walk('', False)):
209 yield x
211 yield x
210
212
211 def copylist(self):
213 def copylist(self):
212 return ['requires'] + _data.split()
214 return ['requires'] + _data.split()
213
215
214 class encodedstore(basicstore):
216 class encodedstore(basicstore):
215 def __init__(self, path, opener, pathjoiner):
217 def __init__(self, path, opener, pathjoiner):
216 self.pathjoiner = pathjoiner
218 self.pathjoiner = pathjoiner
217 self.path = self.pathjoiner(path, 'store')
219 self.path = self.pathjoiner(path, 'store')
218 self.createmode = _calcmode(self.path)
220 self.createmode = _calcmode(self.path)
219 op = opener(self.path)
221 op = opener(self.path)
220 op.createmode = self.createmode
222 op.createmode = self.createmode
221 self.opener = lambda f, *args, **kw: op(encodefilename(f), *args, **kw)
223 self.opener = lambda f, *args, **kw: op(encodefilename(f), *args, **kw)
222
224
223 def datafiles(self):
225 def datafiles(self):
224 for a, b, size in self._walk('data', True):
226 for a, b, size in self._walk('data', True):
225 try:
227 try:
226 a = decodefilename(a)
228 a = decodefilename(a)
227 except KeyError:
229 except KeyError:
228 a = None
230 a = None
229 yield a, b, size
231 yield a, b, size
230
232
231 def join(self, f):
233 def join(self, f):
232 return self.pathjoiner(self.path, encodefilename(f))
234 return self.pathjoiner(self.path, encodefilename(f))
233
235
234 def copylist(self):
236 def copylist(self):
235 return (['requires', '00changelog.i'] +
237 return (['requires', '00changelog.i'] +
236 [self.pathjoiner('store', f) for f in _data.split()])
238 [self.pathjoiner('store', f) for f in _data.split()])
237
239
238 class fncache(object):
240 class fncache(object):
239 # the filename used to be partially encoded
241 # the filename used to be partially encoded
240 # hence the encodedir/decodedir dance
242 # hence the encodedir/decodedir dance
241 def __init__(self, opener):
243 def __init__(self, opener):
242 self.opener = opener
244 self.opener = opener
243 self.entries = None
245 self.entries = None
244
246
245 def _load(self):
247 def _load(self):
246 '''fill the entries from the fncache file'''
248 '''fill the entries from the fncache file'''
247 self.entries = set()
249 self.entries = set()
248 try:
250 try:
249 fp = self.opener('fncache', mode='rb')
251 fp = self.opener('fncache', mode='rb')
250 except IOError:
252 except IOError:
251 # skip nonexistent file
253 # skip nonexistent file
252 return
254 return
253 for n, line in enumerate(fp):
255 for n, line in enumerate(fp):
254 if (len(line) < 2) or (line[-1] != '\n'):
256 if (len(line) < 2) or (line[-1] != '\n'):
255 t = _('invalid entry in fncache, line %s') % (n + 1)
257 t = _('invalid entry in fncache, line %s') % (n + 1)
256 raise util.Abort(t)
258 raise util.Abort(t)
257 self.entries.add(decodedir(line[:-1]))
259 self.entries.add(decodedir(line[:-1]))
258 fp.close()
260 fp.close()
259
261
260 def rewrite(self, files):
262 def rewrite(self, files):
261 fp = self.opener('fncache', mode='wb')
263 fp = self.opener('fncache', mode='wb')
262 for p in files:
264 for p in files:
263 fp.write(encodedir(p) + '\n')
265 fp.write(encodedir(p) + '\n')
264 fp.close()
266 fp.close()
265 self.entries = set(files)
267 self.entries = set(files)
266
268
267 def add(self, fn):
269 def add(self, fn):
268 if self.entries is None:
270 if self.entries is None:
269 self._load()
271 self._load()
270 if fn not in self.entries:
272 if fn not in self.entries:
271 self.opener('fncache', 'ab').write(encodedir(fn) + '\n')
273 self.opener('fncache', 'ab').write(encodedir(fn) + '\n')
272 self.entries.add(fn)
274 self.entries.add(fn)
273
275
274 def __contains__(self, fn):
276 def __contains__(self, fn):
275 if self.entries is None:
277 if self.entries is None:
276 self._load()
278 self._load()
277 return fn in self.entries
279 return fn in self.entries
278
280
279 def __iter__(self):
281 def __iter__(self):
280 if self.entries is None:
282 if self.entries is None:
281 self._load()
283 self._load()
282 return iter(self.entries)
284 return iter(self.entries)
283
285
284 class fncachestore(basicstore):
286 class fncachestore(basicstore):
285 def __init__(self, path, opener, pathjoiner):
287 def __init__(self, path, opener, pathjoiner, encode):
288 self.encode = encode
286 self.pathjoiner = pathjoiner
289 self.pathjoiner = pathjoiner
287 self.path = self.pathjoiner(path, 'store')
290 self.path = self.pathjoiner(path, 'store')
288 self.createmode = _calcmode(self.path)
291 self.createmode = _calcmode(self.path)
289 op = opener(self.path)
292 op = opener(self.path)
290 op.createmode = self.createmode
293 op.createmode = self.createmode
291 fnc = fncache(op)
294 fnc = fncache(op)
292 self.fncache = fnc
295 self.fncache = fnc
293
296
294 def fncacheopener(path, mode='r', *args, **kw):
297 def fncacheopener(path, mode='r', *args, **kw):
295 if mode not in ('r', 'rb') and path.startswith('data/'):
298 if mode not in ('r', 'rb') and path.startswith('data/'):
296 fnc.add(path)
299 fnc.add(path)
297 return op(hybridencode(path), mode, *args, **kw)
300 return op(self.encode(path), mode, *args, **kw)
298 self.opener = fncacheopener
301 self.opener = fncacheopener
299
302
300 def join(self, f):
303 def join(self, f):
301 return self.pathjoiner(self.path, hybridencode(f))
304 return self.pathjoiner(self.path, self.encode(f))
302
305
303 def datafiles(self):
306 def datafiles(self):
304 rewrite = False
307 rewrite = False
305 existing = []
308 existing = []
306 pjoin = self.pathjoiner
309 pjoin = self.pathjoiner
307 spath = self.path
310 spath = self.path
308 for f in self.fncache:
311 for f in self.fncache:
309 ef = hybridencode(f)
312 ef = self.encode(f)
310 try:
313 try:
311 st = os.stat(pjoin(spath, ef))
314 st = os.stat(pjoin(spath, ef))
312 yield f, ef, st.st_size
315 yield f, ef, st.st_size
313 existing.append(f)
316 existing.append(f)
314 except OSError:
317 except OSError:
315 # nonexistent entry
318 # nonexistent entry
316 rewrite = True
319 rewrite = True
317 if rewrite:
320 if rewrite:
318 # rewrite fncache to remove nonexistent entries
321 # rewrite fncache to remove nonexistent entries
319 # (may be caused by rollback / strip)
322 # (may be caused by rollback / strip)
320 self.fncache.rewrite(existing)
323 self.fncache.rewrite(existing)
321
324
322 def copylist(self):
325 def copylist(self):
323 d = _data + ' dh fncache'
326 d = _data + ' dh fncache'
324 return (['requires', '00changelog.i'] +
327 return (['requires', '00changelog.i'] +
325 [self.pathjoiner('store', f) for f in d.split()])
328 [self.pathjoiner('store', f) for f in d.split()])
326
329
327 def store(requirements, path, opener, pathjoiner=None):
330 def store(requirements, path, opener, pathjoiner=None):
328 pathjoiner = pathjoiner or os.path.join
331 pathjoiner = pathjoiner or os.path.join
329 if 'store' in requirements:
332 if 'store' in requirements:
330 if 'fncache' in requirements:
333 if 'fncache' in requirements:
331 return fncachestore(path, opener, pathjoiner)
334 auxencode = lambda f: _auxencode(f, 'dotencode' in requirements)
335 encode = lambda f: _hybridencode(f, auxencode)
336 return fncachestore(path, opener, pathjoiner, encode)
332 return encodedstore(path, opener, pathjoiner)
337 return encodedstore(path, opener, pathjoiner)
333 return basicstore(path, opener, pathjoiner)
338 return basicstore(path, opener, pathjoiner)
@@ -1,25 +1,29 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 from mercurial import store
3 from mercurial import store
4
4
5 enc = store.hybridencode # used for fncache repo format
5 auxencode = lambda f: store._auxencode(f, True)
6 hybridencode = lambda f: store._hybridencode(f, auxencode)
7
8 enc = hybridencode # used for 'dotencode' repo format
6
9
7 def show(s):
10 def show(s):
8 print "A = '%s'" % s
11 print "A = '%s'" % s
9 print "B = '%s'" % enc(s)
12 print "B = '%s'" % enc(s)
10 print
13 print
11
14
12 show('data/aux.bla/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c.i')
15 show('data/aux.bla/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c.i')
13
16
14 show('data/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/'
17 show('data/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/'
15 'TENTH/ELEVENTH/LOREMIPSUM.TXT.i')
18 'TENTH/ELEVENTH/LOREMIPSUM.TXT.i')
16 show('data/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/'
19 show('data/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/'
17 'wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules'
20 'wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules'
18 '.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider.i')
21 '.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider.i')
19 show('data/AUX.THE-QUICK-BROWN-FOX-JU:MPS-OVER-THE-LAZY-DOG-THE-QUICK-'
22 show('data/AUX.THE-QUICK-BROWN-FOX-JU:MPS-OVER-THE-LAZY-DOG-THE-QUICK-'
20 'BROWN-FOX-JUMPS-OVER-THE-LAZY-DOG.TXT.i')
23 'BROWN-FOX-JUMPS-OVER-THE-LAZY-DOG.TXT.i')
21 show('data/Project Planning/Resources/AnotherLongDirectoryName/'
24 show('data/Project Planning/Resources/AnotherLongDirectoryName/'
22 'Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt')
25 'Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt')
23 show('data/Project.Planning/Resources/AnotherLongDirectoryName/'
26 show('data/Project.Planning/Resources/AnotherLongDirectoryName/'
24 'Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt')
27 'Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt')
25 show('data/foo.../foo / /a./_. /__/.x../ bla/something.i')
28 show('data/foo.../foo / /a./_. /__/.x../ bla/.FOO/something.i')
29
@@ -1,21 +1,21 b''
1 A = 'data/aux.bla/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c.i'
1 A = 'data/aux.bla/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c.i'
2 B = 'data/au~78.bla/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i'
2 B = 'data/au~78.bla/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i'
3
3
4 A = 'data/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT.i'
4 A = 'data/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT.i'
5 B = 'dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i'
5 B = 'dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i'
6
6
7 A = 'data/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider.i'
7 A = 'data/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider.i'
8 B = 'dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i'
8 B = 'dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i'
9
9
10 A = 'data/AUX.THE-QUICK-BROWN-FOX-JU:MPS-OVER-THE-LAZY-DOG-THE-QUICK-BROWN-FOX-JUMPS-OVER-THE-LAZY-DOG.TXT.i'
10 A = 'data/AUX.THE-QUICK-BROWN-FOX-JU:MPS-OVER-THE-LAZY-DOG-THE-QUICK-BROWN-FOX-JUMPS-OVER-THE-LAZY-DOG.TXT.i'
11 B = 'dh/au~78.the-quick-brown-fox-ju~3amps-over-the-lazy-dog-the-quick-brown-fox-jud4dcadd033000ab2b26eb66bae1906bcb15d4a70.i'
11 B = 'dh/au~78.the-quick-brown-fox-ju~3amps-over-the-lazy-dog-the-quick-brown-fox-jud4dcadd033000ab2b26eb66bae1906bcb15d4a70.i'
12
12
13 A = 'data/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt'
13 A = 'data/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt'
14 B = 'dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilenaf93030515d9849cfdca52937c2204d19f83913e5.txt'
14 B = 'dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilenaf93030515d9849cfdca52937c2204d19f83913e5.txt'
15
15
16 A = 'data/Project.Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt'
16 A = 'data/Project.Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt'
17 B = 'dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilena0fd7c506f5c9d58204444fc67e9499006bd2d445.txt'
17 B = 'dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilena0fd7c506f5c9d58204444fc67e9499006bd2d445.txt'
18
18
19 A = 'data/foo.../foo / /a./_. /__/.x../ bla/something.i'
19 A = 'data/foo.../foo / /a./_. /__/.x../ bla/.FOO/something.i'
20 B = 'data/foo..~2e/foo ~20/~20/a~2e/__.~20/____/.x.~2e/ bla/something.i'
20 B = 'data/foo..~2e/foo ~20/~20/a~2e/__.~20/____/~2ex.~2e/~20 bla/~2e_f_o_o/something.i'
21
21
@@ -1,187 +1,191 b''
1 This test tries to exercise the ssh functionality with a dummy script
1 This test tries to exercise the ssh functionality with a dummy script
2
2
3 $ cat <<EOF > dummyssh
3 $ cat <<EOF > dummyssh
4 > import sys
4 > import sys
5 > import os
5 > import os
6 >
6 >
7 > os.chdir(os.path.dirname(sys.argv[0]))
7 > os.chdir(os.path.dirname(sys.argv[0]))
8 > if sys.argv[1] != "user@dummy":
8 > if sys.argv[1] != "user@dummy":
9 > sys.exit(-1)
9 > sys.exit(-1)
10 >
10 >
11 > if not os.path.exists("dummyssh"):
11 > if not os.path.exists("dummyssh"):
12 > sys.exit(-1)
12 > sys.exit(-1)
13 >
13 >
14 > log = open("dummylog", "ab")
14 > log = open("dummylog", "ab")
15 > log.write("Got arguments")
15 > log.write("Got arguments")
16 > for i, arg in enumerate(sys.argv[1:]):
16 > for i, arg in enumerate(sys.argv[1:]):
17 > log.write(" %d:%s" % (i+1, arg))
17 > log.write(" %d:%s" % (i+1, arg))
18 > log.write("\n")
18 > log.write("\n")
19 > log.close()
19 > log.close()
20 > r = os.system(sys.argv[2])
20 > r = os.system(sys.argv[2])
21 > sys.exit(bool(r))
21 > sys.exit(bool(r))
22 > EOF
22 > EOF
23
23
24 $ checknewrepo()
24 $ checknewrepo()
25 > {
25 > {
26 > name=$1
26 > name=$1
27 > if [ -d "$name"/.hg/store ]; then
27 > if [ -d "$name"/.hg/store ]; then
28 > echo store created
28 > echo store created
29 > fi
29 > fi
30 > if [ -f "$name"/.hg/00changelog.i ]; then
30 > if [ -f "$name"/.hg/00changelog.i ]; then
31 > echo 00changelog.i created
31 > echo 00changelog.i created
32 > fi
32 > fi
33 > cat "$name"/.hg/requires
33 > cat "$name"/.hg/requires
34 > }
34 > }
35
35
36 creating 'local'
36 creating 'local'
37
37
38 $ hg init local
38 $ hg init local
39 $ checknewrepo local
39 $ checknewrepo local
40 store created
40 store created
41 00changelog.i created
41 00changelog.i created
42 revlogv1
42 revlogv1
43 store
43 store
44 fncache
44 fncache
45 dotencode
45 $ echo this > local/foo
46 $ echo this > local/foo
46 $ hg ci --cwd local -A -m "init"
47 $ hg ci --cwd local -A -m "init"
47 adding foo
48 adding foo
48
49
49 creating repo with format.usestore=false
50 creating repo with format.usestore=false
50
51
51 $ hg --config format.usestore=false init old
52 $ hg --config format.usestore=false init old
52 $ checknewrepo old
53 $ checknewrepo old
53 revlogv1
54 revlogv1
54
55
55 creating repo with format.usefncache=false
56 creating repo with format.usefncache=false
56
57
57 $ hg --config format.usefncache=false init old2
58 $ hg --config format.usefncache=false init old2
58 $ checknewrepo old2
59 $ checknewrepo old2
59 store created
60 store created
60 00changelog.i created
61 00changelog.i created
61 revlogv1
62 revlogv1
62 store
63 store
63
64
64 test failure
65 test failure
65
66
66 $ hg init local
67 $ hg init local
67 abort: repository local already exists!
68 abort: repository local already exists!
68 [255]
69 [255]
69
70
70 init+push to remote2
71 init+push to remote2
71
72
72 $ hg init -e "python ./dummyssh" ssh://user@dummy/remote2
73 $ hg init -e "python ./dummyssh" ssh://user@dummy/remote2
73 $ hg incoming -R remote2 local
74 $ hg incoming -R remote2 local
74 comparing with local
75 comparing with local
75 changeset: 0:08b9e9f63b32
76 changeset: 0:08b9e9f63b32
76 tag: tip
77 tag: tip
77 user: test
78 user: test
78 date: Thu Jan 01 00:00:00 1970 +0000
79 date: Thu Jan 01 00:00:00 1970 +0000
79 summary: init
80 summary: init
80
81
81
82
82 $ hg push -R local -e "python ./dummyssh" ssh://user@dummy/remote2
83 $ hg push -R local -e "python ./dummyssh" ssh://user@dummy/remote2
83 pushing to ssh://user@dummy/remote2
84 pushing to ssh://user@dummy/remote2
84 searching for changes
85 searching for changes
85 remote: adding changesets
86 remote: adding changesets
86 remote: adding manifests
87 remote: adding manifests
87 remote: adding file changes
88 remote: adding file changes
88 remote: added 1 changesets with 1 changes to 1 files
89 remote: added 1 changesets with 1 changes to 1 files
89
90
90 clone to remote1
91 clone to remote1
91
92
92 $ hg clone -e "python ./dummyssh" local ssh://user@dummy/remote1
93 $ hg clone -e "python ./dummyssh" local ssh://user@dummy/remote1
93 searching for changes
94 searching for changes
94 remote: adding changesets
95 remote: adding changesets
95 remote: adding manifests
96 remote: adding manifests
96 remote: adding file changes
97 remote: adding file changes
97 remote: added 1 changesets with 1 changes to 1 files
98 remote: added 1 changesets with 1 changes to 1 files
98
99
99 init to existing repo
100 init to existing repo
100
101
101 $ hg init -e "python ./dummyssh" ssh://user@dummy/remote1
102 $ hg init -e "python ./dummyssh" ssh://user@dummy/remote1
102 abort: repository remote1 already exists!
103 abort: repository remote1 already exists!
103 abort: could not create remote repo!
104 abort: could not create remote repo!
104 [255]
105 [255]
105
106
106 clone to existing repo
107 clone to existing repo
107
108
108 $ hg clone -e "python ./dummyssh" local ssh://user@dummy/remote1
109 $ hg clone -e "python ./dummyssh" local ssh://user@dummy/remote1
109 abort: repository remote1 already exists!
110 abort: repository remote1 already exists!
110 abort: could not create remote repo!
111 abort: could not create remote repo!
111 [255]
112 [255]
112
113
113 output of dummyssh
114 output of dummyssh
114
115
115 $ cat dummylog
116 $ cat dummylog
116 Got arguments 1:user@dummy 2:hg init remote2
117 Got arguments 1:user@dummy 2:hg init remote2
117 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio
118 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio
118 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio
119 Got arguments 1:user@dummy 2:hg -R remote2 serve --stdio
119 Got arguments 1:user@dummy 2:hg init remote1
120 Got arguments 1:user@dummy 2:hg init remote1
120 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio
121 Got arguments 1:user@dummy 2:hg -R remote1 serve --stdio
121 Got arguments 1:user@dummy 2:hg init remote1
122 Got arguments 1:user@dummy 2:hg init remote1
122 Got arguments 1:user@dummy 2:hg init remote1
123 Got arguments 1:user@dummy 2:hg init remote1
123
124
124 comparing repositories
125 comparing repositories
125
126
126 $ hg tip -q -R local
127 $ hg tip -q -R local
127 0:08b9e9f63b32
128 0:08b9e9f63b32
128 $ hg tip -q -R remote1
129 $ hg tip -q -R remote1
129 0:08b9e9f63b32
130 0:08b9e9f63b32
130 $ hg tip -q -R remote2
131 $ hg tip -q -R remote2
131 0:08b9e9f63b32
132 0:08b9e9f63b32
132
133
133 check names for repositories (clashes with URL schemes, special chars)
134 check names for repositories (clashes with URL schemes, special chars)
134
135
135 $ for i in bundle file hg http https old-http ssh static-http " " "with space"; do
136 $ for i in bundle file hg http https old-http ssh static-http " " "with space"; do
136 > printf "hg init \"$i\"... "
137 > printf "hg init \"$i\"... "
137 > hg init "$i"
138 > hg init "$i"
138 > test -d "$i" -a -d "$i/.hg" && echo "ok" || echo "failed"
139 > test -d "$i" -a -d "$i/.hg" && echo "ok" || echo "failed"
139 > done
140 > done
140 hg init "bundle"... ok
141 hg init "bundle"... ok
141 hg init "file"... ok
142 hg init "file"... ok
142 hg init "hg"... ok
143 hg init "hg"... ok
143 hg init "http"... ok
144 hg init "http"... ok
144 hg init "https"... ok
145 hg init "https"... ok
145 hg init "old-http"... ok
146 hg init "old-http"... ok
146 hg init "ssh"... ok
147 hg init "ssh"... ok
147 hg init "static-http"... ok
148 hg init "static-http"... ok
148 hg init " "... ok
149 hg init " "... ok
149 hg init "with space"... ok
150 hg init "with space"... ok
150
151
151 creating 'local/sub/repo'
152 creating 'local/sub/repo'
152
153
153 $ hg init local/sub/repo
154 $ hg init local/sub/repo
154 $ checknewrepo local/sub/repo
155 $ checknewrepo local/sub/repo
155 store created
156 store created
156 00changelog.i created
157 00changelog.i created
157 revlogv1
158 revlogv1
158 store
159 store
159 fncache
160 fncache
161 dotencode
160
162
161 prepare test of init of url configured from paths
163 prepare test of init of url configured from paths
162
164
163 $ echo '[paths]' >> $HGRCPATH
165 $ echo '[paths]' >> $HGRCPATH
164 $ echo "somewhere = `pwd`/url from paths" >> $HGRCPATH
166 $ echo "somewhere = `pwd`/url from paths" >> $HGRCPATH
165 $ echo "elsewhere = `pwd`/another paths url" >> $HGRCPATH
167 $ echo "elsewhere = `pwd`/another paths url" >> $HGRCPATH
166
168
167 init should (for consistency with clone) expand the url
169 init should (for consistency with clone) expand the url
168
170
169 $ hg init somewhere
171 $ hg init somewhere
170 $ checknewrepo "url from paths"
172 $ checknewrepo "url from paths"
171 store created
173 store created
172 00changelog.i created
174 00changelog.i created
173 revlogv1
175 revlogv1
174 store
176 store
175 fncache
177 fncache
178 dotencode
176
179
177 verify that clone also expand urls
180 verify that clone also expand urls
178
181
179 $ hg clone somewhere elsewhere
182 $ hg clone somewhere elsewhere
180 updating to branch default
183 updating to branch default
181 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
184 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
182 $ checknewrepo "another paths url"
185 $ checknewrepo "another paths url"
183 store created
186 store created
184 00changelog.i created
187 00changelog.i created
185 revlogv1
188 revlogv1
186 store
189 store
187 fncache
190 fncache
191 dotencode
General Comments 0
You need to be logged in to leave comments. Login now