##// END OF EJS Templates
merge with main
Benoit Boissinot -
r10321:6e721636 merge default
parent child Browse files
Show More
@@ -1,2169 +1,2169 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self._branchcache = None # in UTF-8
101 self._branchcache = None # in UTF-8
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.nodetagscache = None
103 self.nodetagscache = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 @propertycache
108 @propertycache
109 def changelog(self):
109 def changelog(self):
110 c = changelog.changelog(self.sopener)
110 c = changelog.changelog(self.sopener)
111 if 'HG_PENDING' in os.environ:
111 if 'HG_PENDING' in os.environ:
112 p = os.environ['HG_PENDING']
112 p = os.environ['HG_PENDING']
113 if p.startswith(self.root):
113 if p.startswith(self.root):
114 c.readpending('00changelog.i.a')
114 c.readpending('00changelog.i.a')
115 self.sopener.defversion = c.version
115 self.sopener.defversion = c.version
116 return c
116 return c
117
117
118 @propertycache
118 @propertycache
119 def manifest(self):
119 def manifest(self):
120 return manifest.manifest(self.sopener)
120 return manifest.manifest(self.sopener)
121
121
122 @propertycache
122 @propertycache
123 def dirstate(self):
123 def dirstate(self):
124 return dirstate.dirstate(self.opener, self.ui, self.root)
124 return dirstate.dirstate(self.opener, self.ui, self.root)
125
125
126 def __getitem__(self, changeid):
126 def __getitem__(self, changeid):
127 if changeid is None:
127 if changeid is None:
128 return context.workingctx(self)
128 return context.workingctx(self)
129 return context.changectx(self, changeid)
129 return context.changectx(self, changeid)
130
130
131 def __contains__(self, changeid):
131 def __contains__(self, changeid):
132 try:
132 try:
133 return bool(self.lookup(changeid))
133 return bool(self.lookup(changeid))
134 except error.RepoLookupError:
134 except error.RepoLookupError:
135 return False
135 return False
136
136
137 def __nonzero__(self):
137 def __nonzero__(self):
138 return True
138 return True
139
139
140 def __len__(self):
140 def __len__(self):
141 return len(self.changelog)
141 return len(self.changelog)
142
142
143 def __iter__(self):
143 def __iter__(self):
144 for i in xrange(len(self)):
144 for i in xrange(len(self)):
145 yield i
145 yield i
146
146
147 def url(self):
147 def url(self):
148 return 'file:' + self.root
148 return 'file:' + self.root
149
149
150 def hook(self, name, throw=False, **args):
150 def hook(self, name, throw=False, **args):
151 return hook.hook(self.ui, self, name, throw, **args)
151 return hook.hook(self.ui, self, name, throw, **args)
152
152
153 tag_disallowed = ':\r\n'
153 tag_disallowed = ':\r\n'
154
154
155 def _tag(self, names, node, message, local, user, date, extra={}):
155 def _tag(self, names, node, message, local, user, date, extra={}):
156 if isinstance(names, str):
156 if isinstance(names, str):
157 allchars = names
157 allchars = names
158 names = (names,)
158 names = (names,)
159 else:
159 else:
160 allchars = ''.join(names)
160 allchars = ''.join(names)
161 for c in self.tag_disallowed:
161 for c in self.tag_disallowed:
162 if c in allchars:
162 if c in allchars:
163 raise util.Abort(_('%r cannot be used in a tag name') % c)
163 raise util.Abort(_('%r cannot be used in a tag name') % c)
164
164
165 for name in names:
165 for name in names:
166 self.hook('pretag', throw=True, node=hex(node), tag=name,
166 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 local=local)
167 local=local)
168
168
169 def writetags(fp, names, munge, prevtags):
169 def writetags(fp, names, munge, prevtags):
170 fp.seek(0, 2)
170 fp.seek(0, 2)
171 if prevtags and prevtags[-1] != '\n':
171 if prevtags and prevtags[-1] != '\n':
172 fp.write('\n')
172 fp.write('\n')
173 for name in names:
173 for name in names:
174 m = munge and munge(name) or name
174 m = munge and munge(name) or name
175 if self._tagtypes and name in self._tagtypes:
175 if self._tagtypes and name in self._tagtypes:
176 old = self._tags.get(name, nullid)
176 old = self._tags.get(name, nullid)
177 fp.write('%s %s\n' % (hex(old), m))
177 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(node), m))
178 fp.write('%s %s\n' % (hex(node), m))
179 fp.close()
179 fp.close()
180
180
181 prevtags = ''
181 prevtags = ''
182 if local:
182 if local:
183 try:
183 try:
184 fp = self.opener('localtags', 'r+')
184 fp = self.opener('localtags', 'r+')
185 except IOError:
185 except IOError:
186 fp = self.opener('localtags', 'a')
186 fp = self.opener('localtags', 'a')
187 else:
187 else:
188 prevtags = fp.read()
188 prevtags = fp.read()
189
189
190 # local tags are stored in the current charset
190 # local tags are stored in the current charset
191 writetags(fp, names, None, prevtags)
191 writetags(fp, names, None, prevtags)
192 for name in names:
192 for name in names:
193 self.hook('tag', node=hex(node), tag=name, local=local)
193 self.hook('tag', node=hex(node), tag=name, local=local)
194 return
194 return
195
195
196 try:
196 try:
197 fp = self.wfile('.hgtags', 'rb+')
197 fp = self.wfile('.hgtags', 'rb+')
198 except IOError:
198 except IOError:
199 fp = self.wfile('.hgtags', 'ab')
199 fp = self.wfile('.hgtags', 'ab')
200 else:
200 else:
201 prevtags = fp.read()
201 prevtags = fp.read()
202
202
203 # committed tags are stored in UTF-8
203 # committed tags are stored in UTF-8
204 writetags(fp, names, encoding.fromlocal, prevtags)
204 writetags(fp, names, encoding.fromlocal, prevtags)
205
205
206 if '.hgtags' not in self.dirstate:
206 if '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
207 self.add(['.hgtags'])
208
208
209 m = match_.exact(self.root, '', ['.hgtags'])
209 m = match_.exact(self.root, '', ['.hgtags'])
210 tagnode = self.commit(message, user, date, extra=extra, match=m)
210 tagnode = self.commit(message, user, date, extra=extra, match=m)
211
211
212 for name in names:
212 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
213 self.hook('tag', node=hex(node), tag=name, local=local)
214
214
215 return tagnode
215 return tagnode
216
216
217 def tag(self, names, node, message, local, user, date):
217 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
218 '''tag a revision with one or more symbolic names.
219
219
220 names is a list of strings or, when adding a single tag, names may be a
220 names is a list of strings or, when adding a single tag, names may be a
221 string.
221 string.
222
222
223 if local is True, the tags are stored in a per-repository file.
223 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
224 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
225 changeset is committed with the change.
226
226
227 keyword arguments:
227 keyword arguments:
228
228
229 local: whether to store tags in non-version-controlled file
229 local: whether to store tags in non-version-controlled file
230 (default False)
230 (default False)
231
231
232 message: commit message to use if committing
232 message: commit message to use if committing
233
233
234 user: name of user to use if committing
234 user: name of user to use if committing
235
235
236 date: date tuple to use if committing'''
236 date: date tuple to use if committing'''
237
237
238 for x in self.status()[:5]:
238 for x in self.status()[:5]:
239 if '.hgtags' in x:
239 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
240 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
241 '(please commit .hgtags manually)'))
242
242
243 self.tags() # instantiate the cache
243 self.tags() # instantiate the cache
244 self._tag(names, node, message, local, user, date)
244 self._tag(names, node, message, local, user, date)
245
245
246 def tags(self):
246 def tags(self):
247 '''return a mapping of tag to node'''
247 '''return a mapping of tag to node'''
248 if self._tags is None:
248 if self._tags is None:
249 (self._tags, self._tagtypes) = self._findtags()
249 (self._tags, self._tagtypes) = self._findtags()
250
250
251 return self._tags
251 return self._tags
252
252
253 def _findtags(self):
253 def _findtags(self):
254 '''Do the hard work of finding tags. Return a pair of dicts
254 '''Do the hard work of finding tags. Return a pair of dicts
255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
255 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 maps tag name to a string like \'global\' or \'local\'.
256 maps tag name to a string like \'global\' or \'local\'.
257 Subclasses or extensions are free to add their own tags, but
257 Subclasses or extensions are free to add their own tags, but
258 should be aware that the returned dicts will be retained for the
258 should be aware that the returned dicts will be retained for the
259 duration of the localrepo object.'''
259 duration of the localrepo object.'''
260
260
261 # XXX what tagtype should subclasses/extensions use? Currently
261 # XXX what tagtype should subclasses/extensions use? Currently
262 # mq and bookmarks add tags, but do not set the tagtype at all.
262 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # Should each extension invent its own tag type? Should there
263 # Should each extension invent its own tag type? Should there
264 # be one tagtype for all such "virtual" tags? Or is the status
264 # be one tagtype for all such "virtual" tags? Or is the status
265 # quo fine?
265 # quo fine?
266
266
267 alltags = {} # map tag name to (node, hist)
267 alltags = {} # map tag name to (node, hist)
268 tagtypes = {}
268 tagtypes = {}
269
269
270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
270 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
271 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272
272
273 # Build the return dicts. Have to re-encode tag names because
273 # Build the return dicts. Have to re-encode tag names because
274 # the tags module always uses UTF-8 (in order not to lose info
274 # the tags module always uses UTF-8 (in order not to lose info
275 # writing to the cache), but the rest of Mercurial wants them in
275 # writing to the cache), but the rest of Mercurial wants them in
276 # local encoding.
276 # local encoding.
277 tags = {}
277 tags = {}
278 for (name, (node, hist)) in alltags.iteritems():
278 for (name, (node, hist)) in alltags.iteritems():
279 if node != nullid:
279 if node != nullid:
280 tags[encoding.tolocal(name)] = node
280 tags[encoding.tolocal(name)] = node
281 tags['tip'] = self.changelog.tip()
281 tags['tip'] = self.changelog.tip()
282 tagtypes = dict([(encoding.tolocal(name), value)
282 tagtypes = dict([(encoding.tolocal(name), value)
283 for (name, value) in tagtypes.iteritems()])
283 for (name, value) in tagtypes.iteritems()])
284 return (tags, tagtypes)
284 return (tags, tagtypes)
285
285
286 def tagtype(self, tagname):
286 def tagtype(self, tagname):
287 '''
287 '''
288 return the type of the given tag. result can be:
288 return the type of the given tag. result can be:
289
289
290 'local' : a local tag
290 'local' : a local tag
291 'global' : a global tag
291 'global' : a global tag
292 None : tag does not exist
292 None : tag does not exist
293 '''
293 '''
294
294
295 self.tags()
295 self.tags()
296
296
297 return self._tagtypes.get(tagname)
297 return self._tagtypes.get(tagname)
298
298
299 def tagslist(self):
299 def tagslist(self):
300 '''return a list of tags ordered by revision'''
300 '''return a list of tags ordered by revision'''
301 l = []
301 l = []
302 for t, n in self.tags().iteritems():
302 for t, n in self.tags().iteritems():
303 try:
303 try:
304 r = self.changelog.rev(n)
304 r = self.changelog.rev(n)
305 except:
305 except:
306 r = -2 # sort to the beginning of the list if unknown
306 r = -2 # sort to the beginning of the list if unknown
307 l.append((r, t, n))
307 l.append((r, t, n))
308 return [(t, n) for r, t, n in sorted(l)]
308 return [(t, n) for r, t, n in sorted(l)]
309
309
310 def nodetags(self, node):
310 def nodetags(self, node):
311 '''return the tags associated with a node'''
311 '''return the tags associated with a node'''
312 if not self.nodetagscache:
312 if not self.nodetagscache:
313 self.nodetagscache = {}
313 self.nodetagscache = {}
314 for t, n in self.tags().iteritems():
314 for t, n in self.tags().iteritems():
315 self.nodetagscache.setdefault(n, []).append(t)
315 self.nodetagscache.setdefault(n, []).append(t)
316 return self.nodetagscache.get(node, [])
316 return self.nodetagscache.get(node, [])
317
317
318 def _branchtags(self, partial, lrev):
318 def _branchtags(self, partial, lrev):
319 # TODO: rename this function?
319 # TODO: rename this function?
320 tiprev = len(self) - 1
320 tiprev = len(self) - 1
321 if lrev != tiprev:
321 if lrev != tiprev:
322 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
322 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324
324
325 return partial
325 return partial
326
326
327 def branchmap(self):
327 def branchmap(self):
328 tip = self.changelog.tip()
328 tip = self.changelog.tip()
329 if self._branchcache is not None and self._branchcachetip == tip:
329 if self._branchcache is not None and self._branchcachetip == tip:
330 return self._branchcache
330 return self._branchcache
331
331
332 oldtip = self._branchcachetip
332 oldtip = self._branchcachetip
333 self._branchcachetip = tip
333 self._branchcachetip = tip
334 if oldtip is None or oldtip not in self.changelog.nodemap:
334 if oldtip is None or oldtip not in self.changelog.nodemap:
335 partial, last, lrev = self._readbranchcache()
335 partial, last, lrev = self._readbranchcache()
336 else:
336 else:
337 lrev = self.changelog.rev(oldtip)
337 lrev = self.changelog.rev(oldtip)
338 partial = self._branchcache
338 partial = self._branchcache
339
339
340 self._branchtags(partial, lrev)
340 self._branchtags(partial, lrev)
341 # this private cache holds all heads (not just tips)
341 # this private cache holds all heads (not just tips)
342 self._branchcache = partial
342 self._branchcache = partial
343
343
344 return self._branchcache
344 return self._branchcache
345
345
346 def branchtags(self):
346 def branchtags(self):
347 '''return a dict where branch names map to the tipmost head of
347 '''return a dict where branch names map to the tipmost head of
348 the branch, open heads come before closed'''
348 the branch, open heads come before closed'''
349 bt = {}
349 bt = {}
350 for bn, heads in self.branchmap().iteritems():
350 for bn, heads in self.branchmap().iteritems():
351 head = None
351 head = None
352 for i in range(len(heads)-1, -1, -1):
352 for i in range(len(heads)-1, -1, -1):
353 h = heads[i]
353 h = heads[i]
354 if 'close' not in self.changelog.read(h)[5]:
354 if 'close' not in self.changelog.read(h)[5]:
355 head = h
355 head = h
356 break
356 break
357 # no open heads were found
357 # no open heads were found
358 if head is None:
358 if head is None:
359 head = heads[-1]
359 head = heads[-1]
360 bt[bn] = head
360 bt[bn] = head
361 return bt
361 return bt
362
362
363
363
364 def _readbranchcache(self):
364 def _readbranchcache(self):
365 partial = {}
365 partial = {}
366 try:
366 try:
367 f = self.opener("branchheads.cache")
367 f = self.opener("branchheads.cache")
368 lines = f.read().split('\n')
368 lines = f.read().split('\n')
369 f.close()
369 f.close()
370 except (IOError, OSError):
370 except (IOError, OSError):
371 return {}, nullid, nullrev
371 return {}, nullid, nullrev
372
372
373 try:
373 try:
374 last, lrev = lines.pop(0).split(" ", 1)
374 last, lrev = lines.pop(0).split(" ", 1)
375 last, lrev = bin(last), int(lrev)
375 last, lrev = bin(last), int(lrev)
376 if lrev >= len(self) or self[lrev].node() != last:
376 if lrev >= len(self) or self[lrev].node() != last:
377 # invalidate the cache
377 # invalidate the cache
378 raise ValueError('invalidating branch cache (tip differs)')
378 raise ValueError('invalidating branch cache (tip differs)')
379 for l in lines:
379 for l in lines:
380 if not l:
380 if not l:
381 continue
381 continue
382 node, label = l.split(" ", 1)
382 node, label = l.split(" ", 1)
383 partial.setdefault(label.strip(), []).append(bin(node))
383 partial.setdefault(label.strip(), []).append(bin(node))
384 except KeyboardInterrupt:
384 except KeyboardInterrupt:
385 raise
385 raise
386 except Exception, inst:
386 except Exception, inst:
387 if self.ui.debugflag:
387 if self.ui.debugflag:
388 self.ui.warn(str(inst), '\n')
388 self.ui.warn(str(inst), '\n')
389 partial, last, lrev = {}, nullid, nullrev
389 partial, last, lrev = {}, nullid, nullrev
390 return partial, last, lrev
390 return partial, last, lrev
391
391
392 def _writebranchcache(self, branches, tip, tiprev):
392 def _writebranchcache(self, branches, tip, tiprev):
393 try:
393 try:
394 f = self.opener("branchheads.cache", "w", atomictemp=True)
394 f = self.opener("branchheads.cache", "w", atomictemp=True)
395 f.write("%s %s\n" % (hex(tip), tiprev))
395 f.write("%s %s\n" % (hex(tip), tiprev))
396 for label, nodes in branches.iteritems():
396 for label, nodes in branches.iteritems():
397 for node in nodes:
397 for node in nodes:
398 f.write("%s %s\n" % (hex(node), label))
398 f.write("%s %s\n" % (hex(node), label))
399 f.rename()
399 f.rename()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 pass
401 pass
402
402
403 def _updatebranchcache(self, partial, start, end):
403 def _updatebranchcache(self, partial, start, end):
404 # collect new branch entries
404 # collect new branch entries
405 newbranches = {}
405 newbranches = {}
406 for r in xrange(start, end):
406 for r in xrange(start, end):
407 c = self[r]
407 c = self[r]
408 newbranches.setdefault(c.branch(), []).append(c.node())
408 newbranches.setdefault(c.branch(), []).append(c.node())
409 # if older branchheads are reachable from new ones, they aren't
409 # if older branchheads are reachable from new ones, they aren't
410 # really branchheads. Note checking parents is insufficient:
410 # really branchheads. Note checking parents is insufficient:
411 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
411 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
412 for branch, newnodes in newbranches.iteritems():
412 for branch, newnodes in newbranches.iteritems():
413 bheads = partial.setdefault(branch, [])
413 bheads = partial.setdefault(branch, [])
414 bheads.extend(newnodes)
414 bheads.extend(newnodes)
415 if len(bheads) < 2:
415 if len(bheads) < 2:
416 continue
416 continue
417 newbheads = []
417 newbheads = []
418 # starting from tip means fewer passes over reachable
418 # starting from tip means fewer passes over reachable
419 while newnodes:
419 while newnodes:
420 latest = newnodes.pop()
420 latest = newnodes.pop()
421 if latest not in bheads:
421 if latest not in bheads:
422 continue
422 continue
423 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
423 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
424 reachable = self.changelog.reachable(latest, minbhrev)
424 reachable = self.changelog.reachable(latest, minbhrev)
425 bheads = [b for b in bheads if b not in reachable]
425 bheads = [b for b in bheads if b not in reachable]
426 newbheads.insert(0, latest)
426 newbheads.insert(0, latest)
427 bheads.extend(newbheads)
427 bheads.extend(newbheads)
428 partial[branch] = bheads
428 partial[branch] = bheads
429
429
430 def lookup(self, key):
430 def lookup(self, key):
431 if isinstance(key, int):
431 if isinstance(key, int):
432 return self.changelog.node(key)
432 return self.changelog.node(key)
433 elif key == '.':
433 elif key == '.':
434 return self.dirstate.parents()[0]
434 return self.dirstate.parents()[0]
435 elif key == 'null':
435 elif key == 'null':
436 return nullid
436 return nullid
437 elif key == 'tip':
437 elif key == 'tip':
438 return self.changelog.tip()
438 return self.changelog.tip()
439 n = self.changelog._match(key)
439 n = self.changelog._match(key)
440 if n:
440 if n:
441 return n
441 return n
442 if key in self.tags():
442 if key in self.tags():
443 return self.tags()[key]
443 return self.tags()[key]
444 if key in self.branchtags():
444 if key in self.branchtags():
445 return self.branchtags()[key]
445 return self.branchtags()[key]
446 n = self.changelog._partialmatch(key)
446 n = self.changelog._partialmatch(key)
447 if n:
447 if n:
448 return n
448 return n
449
449
450 # can't find key, check if it might have come from damaged dirstate
450 # can't find key, check if it might have come from damaged dirstate
451 if key in self.dirstate.parents():
451 if key in self.dirstate.parents():
452 raise error.Abort(_("working directory has unknown parent '%s'!")
452 raise error.Abort(_("working directory has unknown parent '%s'!")
453 % short(key))
453 % short(key))
454 try:
454 try:
455 if len(key) == 20:
455 if len(key) == 20:
456 key = hex(key)
456 key = hex(key)
457 except:
457 except:
458 pass
458 pass
459 raise error.RepoLookupError(_("unknown revision '%s'") % key)
459 raise error.RepoLookupError(_("unknown revision '%s'") % key)
460
460
461 def local(self):
461 def local(self):
462 return True
462 return True
463
463
464 def join(self, f):
464 def join(self, f):
465 return os.path.join(self.path, f)
465 return os.path.join(self.path, f)
466
466
467 def wjoin(self, f):
467 def wjoin(self, f):
468 return os.path.join(self.root, f)
468 return os.path.join(self.root, f)
469
469
470 def rjoin(self, f):
470 def rjoin(self, f):
471 return os.path.join(self.root, util.pconvert(f))
471 return os.path.join(self.root, util.pconvert(f))
472
472
473 def file(self, f):
473 def file(self, f):
474 if f[0] == '/':
474 if f[0] == '/':
475 f = f[1:]
475 f = f[1:]
476 return filelog.filelog(self.sopener, f)
476 return filelog.filelog(self.sopener, f)
477
477
478 def changectx(self, changeid):
478 def changectx(self, changeid):
479 return self[changeid]
479 return self[changeid]
480
480
481 def parents(self, changeid=None):
481 def parents(self, changeid=None):
482 '''get list of changectxs for parents of changeid'''
482 '''get list of changectxs for parents of changeid'''
483 return self[changeid].parents()
483 return self[changeid].parents()
484
484
485 def filectx(self, path, changeid=None, fileid=None):
485 def filectx(self, path, changeid=None, fileid=None):
486 """changeid can be a changeset revision, node, or tag.
486 """changeid can be a changeset revision, node, or tag.
487 fileid can be a file revision or node."""
487 fileid can be a file revision or node."""
488 return context.filectx(self, path, changeid, fileid)
488 return context.filectx(self, path, changeid, fileid)
489
489
490 def getcwd(self):
490 def getcwd(self):
491 return self.dirstate.getcwd()
491 return self.dirstate.getcwd()
492
492
493 def pathto(self, f, cwd=None):
493 def pathto(self, f, cwd=None):
494 return self.dirstate.pathto(f, cwd)
494 return self.dirstate.pathto(f, cwd)
495
495
496 def wfile(self, f, mode='r'):
496 def wfile(self, f, mode='r'):
497 return self.wopener(f, mode)
497 return self.wopener(f, mode)
498
498
499 def _link(self, f):
499 def _link(self, f):
500 return os.path.islink(self.wjoin(f))
500 return os.path.islink(self.wjoin(f))
501
501
502 def _filter(self, filter, filename, data):
502 def _filter(self, filter, filename, data):
503 if filter not in self.filterpats:
503 if filter not in self.filterpats:
504 l = []
504 l = []
505 for pat, cmd in self.ui.configitems(filter):
505 for pat, cmd in self.ui.configitems(filter):
506 if cmd == '!':
506 if cmd == '!':
507 continue
507 continue
508 mf = match_.match(self.root, '', [pat])
508 mf = match_.match(self.root, '', [pat])
509 fn = None
509 fn = None
510 params = cmd
510 params = cmd
511 for name, filterfn in self._datafilters.iteritems():
511 for name, filterfn in self._datafilters.iteritems():
512 if cmd.startswith(name):
512 if cmd.startswith(name):
513 fn = filterfn
513 fn = filterfn
514 params = cmd[len(name):].lstrip()
514 params = cmd[len(name):].lstrip()
515 break
515 break
516 if not fn:
516 if not fn:
517 fn = lambda s, c, **kwargs: util.filter(s, c)
517 fn = lambda s, c, **kwargs: util.filter(s, c)
518 # Wrap old filters not supporting keyword arguments
518 # Wrap old filters not supporting keyword arguments
519 if not inspect.getargspec(fn)[2]:
519 if not inspect.getargspec(fn)[2]:
520 oldfn = fn
520 oldfn = fn
521 fn = lambda s, c, **kwargs: oldfn(s, c)
521 fn = lambda s, c, **kwargs: oldfn(s, c)
522 l.append((mf, fn, params))
522 l.append((mf, fn, params))
523 self.filterpats[filter] = l
523 self.filterpats[filter] = l
524
524
525 for mf, fn, cmd in self.filterpats[filter]:
525 for mf, fn, cmd in self.filterpats[filter]:
526 if mf(filename):
526 if mf(filename):
527 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
527 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
528 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
528 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
529 break
529 break
530
530
531 return data
531 return data
532
532
533 def adddatafilter(self, name, filter):
533 def adddatafilter(self, name, filter):
534 self._datafilters[name] = filter
534 self._datafilters[name] = filter
535
535
536 def wread(self, filename):
536 def wread(self, filename):
537 if self._link(filename):
537 if self._link(filename):
538 data = os.readlink(self.wjoin(filename))
538 data = os.readlink(self.wjoin(filename))
539 else:
539 else:
540 data = self.wopener(filename, 'r').read()
540 data = self.wopener(filename, 'r').read()
541 return self._filter("encode", filename, data)
541 return self._filter("encode", filename, data)
542
542
543 def wwrite(self, filename, data, flags):
543 def wwrite(self, filename, data, flags):
544 data = self._filter("decode", filename, data)
544 data = self._filter("decode", filename, data)
545 try:
545 try:
546 os.unlink(self.wjoin(filename))
546 os.unlink(self.wjoin(filename))
547 except OSError:
547 except OSError:
548 pass
548 pass
549 if 'l' in flags:
549 if 'l' in flags:
550 self.wopener.symlink(data, filename)
550 self.wopener.symlink(data, filename)
551 else:
551 else:
552 self.wopener(filename, 'w').write(data)
552 self.wopener(filename, 'w').write(data)
553 if 'x' in flags:
553 if 'x' in flags:
554 util.set_flags(self.wjoin(filename), False, True)
554 util.set_flags(self.wjoin(filename), False, True)
555
555
556 def wwritedata(self, filename, data):
556 def wwritedata(self, filename, data):
557 return self._filter("decode", filename, data)
557 return self._filter("decode", filename, data)
558
558
559 def transaction(self):
559 def transaction(self):
560 tr = self._transref and self._transref() or None
560 tr = self._transref and self._transref() or None
561 if tr and tr.running():
561 if tr and tr.running():
562 return tr.nest()
562 return tr.nest()
563
563
564 # abort here if the journal already exists
564 # abort here if the journal already exists
565 if os.path.exists(self.sjoin("journal")):
565 if os.path.exists(self.sjoin("journal")):
566 raise error.RepoError(
566 raise error.RepoError(
567 _("abandoned transaction found - run hg recover"))
567 _("abandoned transaction found - run hg recover"))
568
568
569 # save dirstate for rollback
569 # save dirstate for rollback
570 try:
570 try:
571 ds = self.opener("dirstate").read()
571 ds = self.opener("dirstate").read()
572 except IOError:
572 except IOError:
573 ds = ""
573 ds = ""
574 self.opener("journal.dirstate", "w").write(ds)
574 self.opener("journal.dirstate", "w").write(ds)
575 self.opener("journal.branch", "w").write(self.dirstate.branch())
575 self.opener("journal.branch", "w").write(self.dirstate.branch())
576
576
577 renames = [(self.sjoin("journal"), self.sjoin("undo")),
577 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 (self.join("journal.dirstate"), self.join("undo.dirstate")),
578 (self.join("journal.dirstate"), self.join("undo.dirstate")),
579 (self.join("journal.branch"), self.join("undo.branch"))]
579 (self.join("journal.branch"), self.join("undo.branch"))]
580 tr = transaction.transaction(self.ui.warn, self.sopener,
580 tr = transaction.transaction(self.ui.warn, self.sopener,
581 self.sjoin("journal"),
581 self.sjoin("journal"),
582 aftertrans(renames),
582 aftertrans(renames),
583 self.store.createmode)
583 self.store.createmode)
584 self._transref = weakref.ref(tr)
584 self._transref = weakref.ref(tr)
585 return tr
585 return tr
586
586
587 def recover(self):
587 def recover(self):
588 lock = self.lock()
588 lock = self.lock()
589 try:
589 try:
590 if os.path.exists(self.sjoin("journal")):
590 if os.path.exists(self.sjoin("journal")):
591 self.ui.status(_("rolling back interrupted transaction\n"))
591 self.ui.status(_("rolling back interrupted transaction\n"))
592 transaction.rollback(self.sopener, self.sjoin("journal"),
592 transaction.rollback(self.sopener, self.sjoin("journal"),
593 self.ui.warn)
593 self.ui.warn)
594 self.invalidate()
594 self.invalidate()
595 return True
595 return True
596 else:
596 else:
597 self.ui.warn(_("no interrupted transaction available\n"))
597 self.ui.warn(_("no interrupted transaction available\n"))
598 return False
598 return False
599 finally:
599 finally:
600 lock.release()
600 lock.release()
601
601
602 def rollback(self):
602 def rollback(self):
603 wlock = lock = None
603 wlock = lock = None
604 try:
604 try:
605 wlock = self.wlock()
605 wlock = self.wlock()
606 lock = self.lock()
606 lock = self.lock()
607 if os.path.exists(self.sjoin("undo")):
607 if os.path.exists(self.sjoin("undo")):
608 self.ui.status(_("rolling back last transaction\n"))
608 self.ui.status(_("rolling back last transaction\n"))
609 transaction.rollback(self.sopener, self.sjoin("undo"),
609 transaction.rollback(self.sopener, self.sjoin("undo"),
610 self.ui.warn)
610 self.ui.warn)
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 try:
612 try:
613 branch = self.opener("undo.branch").read()
613 branch = self.opener("undo.branch").read()
614 self.dirstate.setbranch(branch)
614 self.dirstate.setbranch(branch)
615 except IOError:
615 except IOError:
616 self.ui.warn(_("Named branch could not be reset, "
616 self.ui.warn(_("Named branch could not be reset, "
617 "current branch still is: %s\n")
617 "current branch still is: %s\n")
618 % encoding.tolocal(self.dirstate.branch()))
618 % encoding.tolocal(self.dirstate.branch()))
619 self.invalidate()
619 self.invalidate()
620 self.dirstate.invalidate()
620 self.dirstate.invalidate()
621 self.destroyed()
621 self.destroyed()
622 else:
622 else:
623 self.ui.warn(_("no rollback information available\n"))
623 self.ui.warn(_("no rollback information available\n"))
624 finally:
624 finally:
625 release(lock, wlock)
625 release(lock, wlock)
626
626
627 def invalidate(self):
627 def invalidate(self):
628 for a in "changelog manifest".split():
628 for a in "changelog manifest".split():
629 if a in self.__dict__:
629 if a in self.__dict__:
630 delattr(self, a)
630 delattr(self, a)
631 self._tags = None
631 self._tags = None
632 self._tagtypes = None
632 self._tagtypes = None
633 self.nodetagscache = None
633 self.nodetagscache = None
634 self._branchcache = None # in UTF-8
634 self._branchcache = None # in UTF-8
635 self._branchcachetip = None
635 self._branchcachetip = None
636
636
637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 try:
638 try:
639 l = lock.lock(lockname, 0, releasefn, desc=desc)
639 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 except error.LockHeld, inst:
640 except error.LockHeld, inst:
641 if not wait:
641 if not wait:
642 raise
642 raise
643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 (desc, inst.locker))
644 (desc, inst.locker))
645 # default to 600 seconds timeout
645 # default to 600 seconds timeout
646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 releasefn, desc=desc)
647 releasefn, desc=desc)
648 if acquirefn:
648 if acquirefn:
649 acquirefn()
649 acquirefn()
650 return l
650 return l
651
651
652 def lock(self, wait=True):
652 def lock(self, wait=True):
653 '''Lock the repository store (.hg/store) and return a weak reference
653 '''Lock the repository store (.hg/store) and return a weak reference
654 to the lock. Use this before modifying the store (e.g. committing or
654 to the lock. Use this before modifying the store (e.g. committing or
655 stripping). If you are opening a transaction, get a lock as well.)'''
655 stripping). If you are opening a transaction, get a lock as well.)'''
656 l = self._lockref and self._lockref()
656 l = self._lockref and self._lockref()
657 if l is not None and l.held:
657 if l is not None and l.held:
658 l.lock()
658 l.lock()
659 return l
659 return l
660
660
661 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
661 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
662 _('repository %s') % self.origroot)
662 _('repository %s') % self.origroot)
663 self._lockref = weakref.ref(l)
663 self._lockref = weakref.ref(l)
664 return l
664 return l
665
665
666 def wlock(self, wait=True):
666 def wlock(self, wait=True):
667 '''Lock the non-store parts of the repository (everything under
667 '''Lock the non-store parts of the repository (everything under
668 .hg except .hg/store) and return a weak reference to the lock.
668 .hg except .hg/store) and return a weak reference to the lock.
669 Use this before modifying files in .hg.'''
669 Use this before modifying files in .hg.'''
670 l = self._wlockref and self._wlockref()
670 l = self._wlockref and self._wlockref()
671 if l is not None and l.held:
671 if l is not None and l.held:
672 l.lock()
672 l.lock()
673 return l
673 return l
674
674
675 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
675 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
676 self.dirstate.invalidate, _('working directory of %s') %
676 self.dirstate.invalidate, _('working directory of %s') %
677 self.origroot)
677 self.origroot)
678 self._wlockref = weakref.ref(l)
678 self._wlockref = weakref.ref(l)
679 return l
679 return l
680
680
681 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
681 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
682 """
682 """
683 commit an individual file as part of a larger transaction
683 commit an individual file as part of a larger transaction
684 """
684 """
685
685
686 fname = fctx.path()
686 fname = fctx.path()
687 text = fctx.data()
687 text = fctx.data()
688 flog = self.file(fname)
688 flog = self.file(fname)
689 fparent1 = manifest1.get(fname, nullid)
689 fparent1 = manifest1.get(fname, nullid)
690 fparent2 = fparent2o = manifest2.get(fname, nullid)
690 fparent2 = fparent2o = manifest2.get(fname, nullid)
691
691
692 meta = {}
692 meta = {}
693 copy = fctx.renamed()
693 copy = fctx.renamed()
694 if copy and copy[0] != fname:
694 if copy and copy[0] != fname:
695 # Mark the new revision of this file as a copy of another
695 # Mark the new revision of this file as a copy of another
696 # file. This copy data will effectively act as a parent
696 # file. This copy data will effectively act as a parent
697 # of this new revision. If this is a merge, the first
697 # of this new revision. If this is a merge, the first
698 # parent will be the nullid (meaning "look up the copy data")
698 # parent will be the nullid (meaning "look up the copy data")
699 # and the second one will be the other parent. For example:
699 # and the second one will be the other parent. For example:
700 #
700 #
701 # 0 --- 1 --- 3 rev1 changes file foo
701 # 0 --- 1 --- 3 rev1 changes file foo
702 # \ / rev2 renames foo to bar and changes it
702 # \ / rev2 renames foo to bar and changes it
703 # \- 2 -/ rev3 should have bar with all changes and
703 # \- 2 -/ rev3 should have bar with all changes and
704 # should record that bar descends from
704 # should record that bar descends from
705 # bar in rev2 and foo in rev1
705 # bar in rev2 and foo in rev1
706 #
706 #
707 # this allows this merge to succeed:
707 # this allows this merge to succeed:
708 #
708 #
709 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
709 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
710 # \ / merging rev3 and rev4 should use bar@rev2
710 # \ / merging rev3 and rev4 should use bar@rev2
711 # \- 2 --- 4 as the merge base
711 # \- 2 --- 4 as the merge base
712 #
712 #
713
713
714 cfname = copy[0]
714 cfname = copy[0]
715 crev = manifest1.get(cfname)
715 crev = manifest1.get(cfname)
716 newfparent = fparent2
716 newfparent = fparent2
717
717
718 if manifest2: # branch merge
718 if manifest2: # branch merge
719 if fparent2 == nullid or crev is None: # copied on remote side
719 if fparent2 == nullid or crev is None: # copied on remote side
720 if cfname in manifest2:
720 if cfname in manifest2:
721 crev = manifest2[cfname]
721 crev = manifest2[cfname]
722 newfparent = fparent1
722 newfparent = fparent1
723
723
724 # find source in nearest ancestor if we've lost track
724 # find source in nearest ancestor if we've lost track
725 if not crev:
725 if not crev:
726 self.ui.debug(" %s: searching for copy revision for %s\n" %
726 self.ui.debug(" %s: searching for copy revision for %s\n" %
727 (fname, cfname))
727 (fname, cfname))
728 for ancestor in self['.'].ancestors():
728 for ancestor in self['.'].ancestors():
729 if cfname in ancestor:
729 if cfname in ancestor:
730 crev = ancestor[cfname].filenode()
730 crev = ancestor[cfname].filenode()
731 break
731 break
732
732
733 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
733 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
734 meta["copy"] = cfname
734 meta["copy"] = cfname
735 meta["copyrev"] = hex(crev)
735 meta["copyrev"] = hex(crev)
736 fparent1, fparent2 = nullid, newfparent
736 fparent1, fparent2 = nullid, newfparent
737 elif fparent2 != nullid:
737 elif fparent2 != nullid:
738 # is one parent an ancestor of the other?
738 # is one parent an ancestor of the other?
739 fparentancestor = flog.ancestor(fparent1, fparent2)
739 fparentancestor = flog.ancestor(fparent1, fparent2)
740 if fparentancestor == fparent1:
740 if fparentancestor == fparent1:
741 fparent1, fparent2 = fparent2, nullid
741 fparent1, fparent2 = fparent2, nullid
742 elif fparentancestor == fparent2:
742 elif fparentancestor == fparent2:
743 fparent2 = nullid
743 fparent2 = nullid
744
744
745 # is the file changed?
745 # is the file changed?
746 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
746 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
747 changelist.append(fname)
747 changelist.append(fname)
748 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
748 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
749
749
750 # are just the flags changed during merge?
750 # are just the flags changed during merge?
751 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
751 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
752 changelist.append(fname)
752 changelist.append(fname)
753
753
754 return fparent1
754 return fparent1
755
755
756 def commit(self, text="", user=None, date=None, match=None, force=False,
756 def commit(self, text="", user=None, date=None, match=None, force=False,
757 editor=False, extra={}):
757 editor=False, extra={}):
758 """Add a new revision to current repository.
758 """Add a new revision to current repository.
759
759
760 Revision information is gathered from the working directory,
760 Revision information is gathered from the working directory,
761 match can be used to filter the committed files. If editor is
761 match can be used to filter the committed files. If editor is
762 supplied, it is called to get a commit message.
762 supplied, it is called to get a commit message.
763 """
763 """
764
764
765 def fail(f, msg):
765 def fail(f, msg):
766 raise util.Abort('%s: %s' % (f, msg))
766 raise util.Abort('%s: %s' % (f, msg))
767
767
768 if not match:
768 if not match:
769 match = match_.always(self.root, '')
769 match = match_.always(self.root, '')
770
770
771 if not force:
771 if not force:
772 vdirs = []
772 vdirs = []
773 match.dir = vdirs.append
773 match.dir = vdirs.append
774 match.bad = fail
774 match.bad = fail
775
775
776 wlock = self.wlock()
776 wlock = self.wlock()
777 try:
777 try:
778 p1, p2 = self.dirstate.parents()
778 p1, p2 = self.dirstate.parents()
779 wctx = self[None]
779 wctx = self[None]
780
780
781 if (not force and p2 != nullid and match and
781 if (not force and p2 != nullid and match and
782 (match.files() or match.anypats())):
782 (match.files() or match.anypats())):
783 raise util.Abort(_('cannot partially commit a merge '
783 raise util.Abort(_('cannot partially commit a merge '
784 '(do not specify files or patterns)'))
784 '(do not specify files or patterns)'))
785
785
786 changes = self.status(match=match, clean=force)
786 changes = self.status(match=match, clean=force)
787 if force:
787 if force:
788 changes[0].extend(changes[6]) # mq may commit unchanged files
788 changes[0].extend(changes[6]) # mq may commit unchanged files
789
789
790 # check subrepos
790 # check subrepos
791 subs = []
791 subs = []
792 for s in wctx.substate:
792 for s in wctx.substate:
793 if match(s) and wctx.sub(s).dirty():
793 if match(s) and wctx.sub(s).dirty():
794 subs.append(s)
794 subs.append(s)
795 if subs and '.hgsubstate' not in changes[0]:
795 if subs and '.hgsubstate' not in changes[0]:
796 changes[0].insert(0, '.hgsubstate')
796 changes[0].insert(0, '.hgsubstate')
797
797
798 # make sure all explicit patterns are matched
798 # make sure all explicit patterns are matched
799 if not force and match.files():
799 if not force and match.files():
800 matched = set(changes[0] + changes[1] + changes[2])
800 matched = set(changes[0] + changes[1] + changes[2])
801
801
802 for f in match.files():
802 for f in match.files():
803 if f == '.' or f in matched or f in wctx.substate:
803 if f == '.' or f in matched or f in wctx.substate:
804 continue
804 continue
805 if f in changes[3]: # missing
805 if f in changes[3]: # missing
806 fail(f, _('file not found!'))
806 fail(f, _('file not found!'))
807 if f in vdirs: # visited directory
807 if f in vdirs: # visited directory
808 d = f + '/'
808 d = f + '/'
809 for mf in matched:
809 for mf in matched:
810 if mf.startswith(d):
810 if mf.startswith(d):
811 break
811 break
812 else:
812 else:
813 fail(f, _("no match under directory!"))
813 fail(f, _("no match under directory!"))
814 elif f not in self.dirstate:
814 elif f not in self.dirstate:
815 fail(f, _("file not tracked!"))
815 fail(f, _("file not tracked!"))
816
816
817 if (not force and not extra.get("close") and p2 == nullid
817 if (not force and not extra.get("close") and p2 == nullid
818 and not (changes[0] or changes[1] or changes[2])
818 and not (changes[0] or changes[1] or changes[2])
819 and self[None].branch() == self['.'].branch()):
819 and self[None].branch() == self['.'].branch()):
820 return None
820 return None
821
821
822 ms = merge_.mergestate(self)
822 ms = merge_.mergestate(self)
823 for f in changes[0]:
823 for f in changes[0]:
824 if f in ms and ms[f] == 'u':
824 if f in ms and ms[f] == 'u':
825 raise util.Abort(_("unresolved merge conflicts "
825 raise util.Abort(_("unresolved merge conflicts "
826 "(see hg resolve)"))
826 "(see hg resolve)"))
827
827
828 cctx = context.workingctx(self, (p1, p2), text, user, date,
828 cctx = context.workingctx(self, (p1, p2), text, user, date,
829 extra, changes)
829 extra, changes)
830 if editor:
830 if editor:
831 cctx._text = editor(self, cctx, subs)
831 cctx._text = editor(self, cctx, subs)
832 edited = (text != cctx._text)
832 edited = (text != cctx._text)
833
833
834 # commit subs
834 # commit subs
835 if subs:
835 if subs:
836 state = wctx.substate.copy()
836 state = wctx.substate.copy()
837 for s in subs:
837 for s in subs:
838 self.ui.status(_('committing subrepository %s\n') % s)
838 self.ui.status(_('committing subrepository %s\n') % s)
839 sr = wctx.sub(s).commit(cctx._text, user, date)
839 sr = wctx.sub(s).commit(cctx._text, user, date)
840 state[s] = (state[s][0], sr)
840 state[s] = (state[s][0], sr)
841 subrepo.writestate(self, state)
841 subrepo.writestate(self, state)
842
842
843 # Save commit message in case this transaction gets rolled back
843 # Save commit message in case this transaction gets rolled back
844 # (e.g. by a pretxncommit hook). Leave the content alone on
844 # (e.g. by a pretxncommit hook). Leave the content alone on
845 # the assumption that the user will use the same editor again.
845 # the assumption that the user will use the same editor again.
846 msgfile = self.opener('last-message.txt', 'wb')
846 msgfile = self.opener('last-message.txt', 'wb')
847 msgfile.write(cctx._text)
847 msgfile.write(cctx._text)
848 msgfile.close()
848 msgfile.close()
849
849
850 try:
850 try:
851 ret = self.commitctx(cctx, True)
851 ret = self.commitctx(cctx, True)
852 except:
852 except:
853 if edited:
853 if edited:
854 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
854 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
855 self.ui.write(
855 self.ui.write(
856 _('note: commit message saved in %s\n') % msgfn)
856 _('note: commit message saved in %s\n') % msgfn)
857 raise
857 raise
858
858
859 # update dirstate and mergestate
859 # update dirstate and mergestate
860 for f in changes[0] + changes[1]:
860 for f in changes[0] + changes[1]:
861 self.dirstate.normal(f)
861 self.dirstate.normal(f)
862 for f in changes[2]:
862 for f in changes[2]:
863 self.dirstate.forget(f)
863 self.dirstate.forget(f)
864 self.dirstate.setparents(ret)
864 self.dirstate.setparents(ret)
865 ms.reset()
865 ms.reset()
866
866
867 return ret
867 return ret
868
868
869 finally:
869 finally:
870 wlock.release()
870 wlock.release()
871
871
872 def commitctx(self, ctx, error=False):
872 def commitctx(self, ctx, error=False):
873 """Add a new revision to current repository.
873 """Add a new revision to current repository.
874
874
875 Revision information is passed via the context argument.
875 Revision information is passed via the context argument.
876 """
876 """
877
877
878 tr = lock = None
878 tr = lock = None
879 removed = ctx.removed()
879 removed = ctx.removed()
880 p1, p2 = ctx.p1(), ctx.p2()
880 p1, p2 = ctx.p1(), ctx.p2()
881 m1 = p1.manifest().copy()
881 m1 = p1.manifest().copy()
882 m2 = p2.manifest()
882 m2 = p2.manifest()
883 user = ctx.user()
883 user = ctx.user()
884
884
885 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
885 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
886 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
886 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
887
887
888 lock = self.lock()
888 lock = self.lock()
889 try:
889 try:
890 tr = self.transaction()
890 tr = self.transaction()
891 trp = weakref.proxy(tr)
891 trp = weakref.proxy(tr)
892
892
893 # check in files
893 # check in files
894 new = {}
894 new = {}
895 changed = []
895 changed = []
896 linkrev = len(self)
896 linkrev = len(self)
897 for f in sorted(ctx.modified() + ctx.added()):
897 for f in sorted(ctx.modified() + ctx.added()):
898 self.ui.note(f + "\n")
898 self.ui.note(f + "\n")
899 try:
899 try:
900 fctx = ctx[f]
900 fctx = ctx[f]
901 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
901 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
902 changed)
902 changed)
903 m1.set(f, fctx.flags())
903 m1.set(f, fctx.flags())
904 except (OSError, IOError):
904 except (OSError, IOError):
905 if error:
905 if error:
906 self.ui.warn(_("trouble committing %s!\n") % f)
906 self.ui.warn(_("trouble committing %s!\n") % f)
907 raise
907 raise
908 else:
908 else:
909 removed.append(f)
909 removed.append(f)
910
910
911 # update manifest
911 # update manifest
912 m1.update(new)
912 m1.update(new)
913 removed = [f for f in sorted(removed) if f in m1 or f in m2]
913 removed = [f for f in sorted(removed) if f in m1 or f in m2]
914 drop = [f for f in removed if f in m1]
914 drop = [f for f in removed if f in m1]
915 for f in drop:
915 for f in drop:
916 del m1[f]
916 del m1[f]
917 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
917 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
918 p2.manifestnode(), (new, drop))
918 p2.manifestnode(), (new, drop))
919
919
920 # update changelog
920 # update changelog
921 self.changelog.delayupdate()
921 self.changelog.delayupdate()
922 n = self.changelog.add(mn, changed + removed, ctx.description(),
922 n = self.changelog.add(mn, changed + removed, ctx.description(),
923 trp, p1.node(), p2.node(),
923 trp, p1.node(), p2.node(),
924 user, ctx.date(), ctx.extra().copy())
924 user, ctx.date(), ctx.extra().copy())
925 p = lambda: self.changelog.writepending() and self.root or ""
925 p = lambda: self.changelog.writepending() and self.root or ""
926 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
926 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
927 parent2=xp2, pending=p)
927 parent2=xp2, pending=p)
928 self.changelog.finalize(trp)
928 self.changelog.finalize(trp)
929 tr.close()
929 tr.close()
930
930
931 if self._branchcache:
931 if self._branchcache:
932 self.branchtags()
932 self.branchtags()
933
933
934 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
934 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
935 return n
935 return n
936 finally:
936 finally:
937 del tr
937 del tr
938 lock.release()
938 lock.release()
939
939
940 def destroyed(self):
940 def destroyed(self):
941 '''Inform the repository that nodes have been destroyed.
941 '''Inform the repository that nodes have been destroyed.
942 Intended for use by strip and rollback, so there's a common
942 Intended for use by strip and rollback, so there's a common
943 place for anything that has to be done after destroying history.'''
943 place for anything that has to be done after destroying history.'''
944 # XXX it might be nice if we could take the list of destroyed
944 # XXX it might be nice if we could take the list of destroyed
945 # nodes, but I don't see an easy way for rollback() to do that
945 # nodes, but I don't see an easy way for rollback() to do that
946
946
947 # Ensure the persistent tag cache is updated. Doing it now
947 # Ensure the persistent tag cache is updated. Doing it now
948 # means that the tag cache only has to worry about destroyed
948 # means that the tag cache only has to worry about destroyed
949 # heads immediately after a strip/rollback. That in turn
949 # heads immediately after a strip/rollback. That in turn
950 # guarantees that "cachetip == currenttip" (comparing both rev
950 # guarantees that "cachetip == currenttip" (comparing both rev
951 # and node) always means no nodes have been added or destroyed.
951 # and node) always means no nodes have been added or destroyed.
952
952
953 # XXX this is suboptimal when qrefresh'ing: we strip the current
953 # XXX this is suboptimal when qrefresh'ing: we strip the current
954 # head, refresh the tag cache, then immediately add a new head.
954 # head, refresh the tag cache, then immediately add a new head.
955 # But I think doing it this way is necessary for the "instant
955 # But I think doing it this way is necessary for the "instant
956 # tag cache retrieval" case to work.
956 # tag cache retrieval" case to work.
957 tags_.findglobaltags(self.ui, self, {}, {})
957 tags_.findglobaltags(self.ui, self, {}, {})
958
958
959 def walk(self, match, node=None):
959 def walk(self, match, node=None):
960 '''
960 '''
961 walk recursively through the directory tree or a given
961 walk recursively through the directory tree or a given
962 changeset, finding all files matched by the match
962 changeset, finding all files matched by the match
963 function
963 function
964 '''
964 '''
965 return self[node].walk(match)
965 return self[node].walk(match)
966
966
967 def status(self, node1='.', node2=None, match=None,
967 def status(self, node1='.', node2=None, match=None,
968 ignored=False, clean=False, unknown=False):
968 ignored=False, clean=False, unknown=False):
969 """return status of files between two nodes or node and working directory
969 """return status of files between two nodes or node and working directory
970
970
971 If node1 is None, use the first dirstate parent instead.
971 If node1 is None, use the first dirstate parent instead.
972 If node2 is None, compare node1 with working directory.
972 If node2 is None, compare node1 with working directory.
973 """
973 """
974
974
975 def mfmatches(ctx):
975 def mfmatches(ctx):
976 mf = ctx.manifest().copy()
976 mf = ctx.manifest().copy()
977 for fn in mf.keys():
977 for fn in mf.keys():
978 if not match(fn):
978 if not match(fn):
979 del mf[fn]
979 del mf[fn]
980 return mf
980 return mf
981
981
982 if isinstance(node1, context.changectx):
982 if isinstance(node1, context.changectx):
983 ctx1 = node1
983 ctx1 = node1
984 else:
984 else:
985 ctx1 = self[node1]
985 ctx1 = self[node1]
986 if isinstance(node2, context.changectx):
986 if isinstance(node2, context.changectx):
987 ctx2 = node2
987 ctx2 = node2
988 else:
988 else:
989 ctx2 = self[node2]
989 ctx2 = self[node2]
990
990
991 working = ctx2.rev() is None
991 working = ctx2.rev() is None
992 parentworking = working and ctx1 == self['.']
992 parentworking = working and ctx1 == self['.']
993 match = match or match_.always(self.root, self.getcwd())
993 match = match or match_.always(self.root, self.getcwd())
994 listignored, listclean, listunknown = ignored, clean, unknown
994 listignored, listclean, listunknown = ignored, clean, unknown
995
995
996 # load earliest manifest first for caching reasons
996 # load earliest manifest first for caching reasons
997 if not working and ctx2.rev() < ctx1.rev():
997 if not working and ctx2.rev() < ctx1.rev():
998 ctx2.manifest()
998 ctx2.manifest()
999
999
1000 if not parentworking:
1000 if not parentworking:
1001 def bad(f, msg):
1001 def bad(f, msg):
1002 if f not in ctx1:
1002 if f not in ctx1:
1003 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1003 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1004 match.bad = bad
1004 match.bad = bad
1005
1005
1006 if working: # we need to scan the working dir
1006 if working: # we need to scan the working dir
1007 subrepos = ctx1.substate.keys()
1007 subrepos = ctx1.substate.keys()
1008 s = self.dirstate.status(match, subrepos, listignored,
1008 s = self.dirstate.status(match, subrepos, listignored,
1009 listclean, listunknown)
1009 listclean, listunknown)
1010 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1010 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1011
1011
1012 # check for any possibly clean files
1012 # check for any possibly clean files
1013 if parentworking and cmp:
1013 if parentworking and cmp:
1014 fixup = []
1014 fixup = []
1015 # do a full compare of any files that might have changed
1015 # do a full compare of any files that might have changed
1016 for f in sorted(cmp):
1016 for f in sorted(cmp):
1017 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1017 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1018 or ctx1[f].cmp(ctx2[f].data())):
1018 or ctx1[f].cmp(ctx2[f].data())):
1019 modified.append(f)
1019 modified.append(f)
1020 else:
1020 else:
1021 fixup.append(f)
1021 fixup.append(f)
1022
1022
1023 if listclean:
1023 if listclean:
1024 clean += fixup
1024 clean += fixup
1025
1025
1026 # update dirstate for files that are actually clean
1026 # update dirstate for files that are actually clean
1027 if fixup:
1027 if fixup:
1028 try:
1028 try:
1029 # updating the dirstate is optional
1029 # updating the dirstate is optional
1030 # so we don't wait on the lock
1030 # so we don't wait on the lock
1031 wlock = self.wlock(False)
1031 wlock = self.wlock(False)
1032 try:
1032 try:
1033 for f in fixup:
1033 for f in fixup:
1034 self.dirstate.normal(f)
1034 self.dirstate.normal(f)
1035 finally:
1035 finally:
1036 wlock.release()
1036 wlock.release()
1037 except error.LockError:
1037 except error.LockError:
1038 pass
1038 pass
1039
1039
1040 if not parentworking:
1040 if not parentworking:
1041 mf1 = mfmatches(ctx1)
1041 mf1 = mfmatches(ctx1)
1042 if working:
1042 if working:
1043 # we are comparing working dir against non-parent
1043 # we are comparing working dir against non-parent
1044 # generate a pseudo-manifest for the working dir
1044 # generate a pseudo-manifest for the working dir
1045 mf2 = mfmatches(self['.'])
1045 mf2 = mfmatches(self['.'])
1046 for f in cmp + modified + added:
1046 for f in cmp + modified + added:
1047 mf2[f] = None
1047 mf2[f] = None
1048 mf2.set(f, ctx2.flags(f))
1048 mf2.set(f, ctx2.flags(f))
1049 for f in removed:
1049 for f in removed:
1050 if f in mf2:
1050 if f in mf2:
1051 del mf2[f]
1051 del mf2[f]
1052 else:
1052 else:
1053 # we are comparing two revisions
1053 # we are comparing two revisions
1054 deleted, unknown, ignored = [], [], []
1054 deleted, unknown, ignored = [], [], []
1055 mf2 = mfmatches(ctx2)
1055 mf2 = mfmatches(ctx2)
1056
1056
1057 modified, added, clean = [], [], []
1057 modified, added, clean = [], [], []
1058 for fn in mf2:
1058 for fn in mf2:
1059 if fn in mf1:
1059 if fn in mf1:
1060 if (mf1.flags(fn) != mf2.flags(fn) or
1060 if (mf1.flags(fn) != mf2.flags(fn) or
1061 (mf1[fn] != mf2[fn] and
1061 (mf1[fn] != mf2[fn] and
1062 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1062 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1063 modified.append(fn)
1063 modified.append(fn)
1064 elif listclean:
1064 elif listclean:
1065 clean.append(fn)
1065 clean.append(fn)
1066 del mf1[fn]
1066 del mf1[fn]
1067 else:
1067 else:
1068 added.append(fn)
1068 added.append(fn)
1069 removed = mf1.keys()
1069 removed = mf1.keys()
1070
1070
1071 r = modified, added, removed, deleted, unknown, ignored, clean
1071 r = modified, added, removed, deleted, unknown, ignored, clean
1072 [l.sort() for l in r]
1072 [l.sort() for l in r]
1073 return r
1073 return r
1074
1074
1075 def add(self, list):
1075 def add(self, list):
1076 wlock = self.wlock()
1076 wlock = self.wlock()
1077 try:
1077 try:
1078 rejected = []
1078 rejected = []
1079 for f in list:
1079 for f in list:
1080 p = self.wjoin(f)
1080 p = self.wjoin(f)
1081 try:
1081 try:
1082 st = os.lstat(p)
1082 st = os.lstat(p)
1083 except:
1083 except:
1084 self.ui.warn(_("%s does not exist!\n") % f)
1084 self.ui.warn(_("%s does not exist!\n") % f)
1085 rejected.append(f)
1085 rejected.append(f)
1086 continue
1086 continue
1087 if st.st_size > 10000000:
1087 if st.st_size > 10000000:
1088 self.ui.warn(_("%s: files over 10MB may cause memory and"
1088 self.ui.warn(_("%s: files over 10MB may cause memory and"
1089 " performance problems\n"
1089 " performance problems\n"
1090 "(use 'hg revert %s' to unadd the file)\n")
1090 "(use 'hg revert %s' to unadd the file)\n")
1091 % (f, f))
1091 % (f, f))
1092 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1092 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1093 self.ui.warn(_("%s not added: only files and symlinks "
1093 self.ui.warn(_("%s not added: only files and symlinks "
1094 "supported currently\n") % f)
1094 "supported currently\n") % f)
1095 rejected.append(p)
1095 rejected.append(p)
1096 elif self.dirstate[f] in 'amn':
1096 elif self.dirstate[f] in 'amn':
1097 self.ui.warn(_("%s already tracked!\n") % f)
1097 self.ui.warn(_("%s already tracked!\n") % f)
1098 elif self.dirstate[f] == 'r':
1098 elif self.dirstate[f] == 'r':
1099 self.dirstate.normallookup(f)
1099 self.dirstate.normallookup(f)
1100 else:
1100 else:
1101 self.dirstate.add(f)
1101 self.dirstate.add(f)
1102 return rejected
1102 return rejected
1103 finally:
1103 finally:
1104 wlock.release()
1104 wlock.release()
1105
1105
1106 def forget(self, list):
1106 def forget(self, list):
1107 wlock = self.wlock()
1107 wlock = self.wlock()
1108 try:
1108 try:
1109 for f in list:
1109 for f in list:
1110 if self.dirstate[f] != 'a':
1110 if self.dirstate[f] != 'a':
1111 self.ui.warn(_("%s not added!\n") % f)
1111 self.ui.warn(_("%s not added!\n") % f)
1112 else:
1112 else:
1113 self.dirstate.forget(f)
1113 self.dirstate.forget(f)
1114 finally:
1114 finally:
1115 wlock.release()
1115 wlock.release()
1116
1116
1117 def remove(self, list, unlink=False):
1117 def remove(self, list, unlink=False):
1118 if unlink:
1118 if unlink:
1119 for f in list:
1119 for f in list:
1120 try:
1120 try:
1121 util.unlink(self.wjoin(f))
1121 util.unlink(self.wjoin(f))
1122 except OSError, inst:
1122 except OSError, inst:
1123 if inst.errno != errno.ENOENT:
1123 if inst.errno != errno.ENOENT:
1124 raise
1124 raise
1125 wlock = self.wlock()
1125 wlock = self.wlock()
1126 try:
1126 try:
1127 for f in list:
1127 for f in list:
1128 if unlink and os.path.exists(self.wjoin(f)):
1128 if unlink and os.path.exists(self.wjoin(f)):
1129 self.ui.warn(_("%s still exists!\n") % f)
1129 self.ui.warn(_("%s still exists!\n") % f)
1130 elif self.dirstate[f] == 'a':
1130 elif self.dirstate[f] == 'a':
1131 self.dirstate.forget(f)
1131 self.dirstate.forget(f)
1132 elif f not in self.dirstate:
1132 elif f not in self.dirstate:
1133 self.ui.warn(_("%s not tracked!\n") % f)
1133 self.ui.warn(_("%s not tracked!\n") % f)
1134 else:
1134 else:
1135 self.dirstate.remove(f)
1135 self.dirstate.remove(f)
1136 finally:
1136 finally:
1137 wlock.release()
1137 wlock.release()
1138
1138
1139 def undelete(self, list):
1139 def undelete(self, list):
1140 manifests = [self.manifest.read(self.changelog.read(p)[0])
1140 manifests = [self.manifest.read(self.changelog.read(p)[0])
1141 for p in self.dirstate.parents() if p != nullid]
1141 for p in self.dirstate.parents() if p != nullid]
1142 wlock = self.wlock()
1142 wlock = self.wlock()
1143 try:
1143 try:
1144 for f in list:
1144 for f in list:
1145 if self.dirstate[f] != 'r':
1145 if self.dirstate[f] != 'r':
1146 self.ui.warn(_("%s not removed!\n") % f)
1146 self.ui.warn(_("%s not removed!\n") % f)
1147 else:
1147 else:
1148 m = f in manifests[0] and manifests[0] or manifests[1]
1148 m = f in manifests[0] and manifests[0] or manifests[1]
1149 t = self.file(f).read(m[f])
1149 t = self.file(f).read(m[f])
1150 self.wwrite(f, t, m.flags(f))
1150 self.wwrite(f, t, m.flags(f))
1151 self.dirstate.normal(f)
1151 self.dirstate.normal(f)
1152 finally:
1152 finally:
1153 wlock.release()
1153 wlock.release()
1154
1154
1155 def copy(self, source, dest):
1155 def copy(self, source, dest):
1156 p = self.wjoin(dest)
1156 p = self.wjoin(dest)
1157 if not (os.path.exists(p) or os.path.islink(p)):
1157 if not (os.path.exists(p) or os.path.islink(p)):
1158 self.ui.warn(_("%s does not exist!\n") % dest)
1158 self.ui.warn(_("%s does not exist!\n") % dest)
1159 elif not (os.path.isfile(p) or os.path.islink(p)):
1159 elif not (os.path.isfile(p) or os.path.islink(p)):
1160 self.ui.warn(_("copy failed: %s is not a file or a "
1160 self.ui.warn(_("copy failed: %s is not a file or a "
1161 "symbolic link\n") % dest)
1161 "symbolic link\n") % dest)
1162 else:
1162 else:
1163 wlock = self.wlock()
1163 wlock = self.wlock()
1164 try:
1164 try:
1165 if self.dirstate[dest] in '?r':
1165 if self.dirstate[dest] in '?r':
1166 self.dirstate.add(dest)
1166 self.dirstate.add(dest)
1167 self.dirstate.copy(source, dest)
1167 self.dirstate.copy(source, dest)
1168 finally:
1168 finally:
1169 wlock.release()
1169 wlock.release()
1170
1170
1171 def heads(self, start=None):
1171 def heads(self, start=None):
1172 heads = self.changelog.heads(start)
1172 heads = self.changelog.heads(start)
1173 # sort the output in rev descending order
1173 # sort the output in rev descending order
1174 heads = [(-self.changelog.rev(h), h) for h in heads]
1174 heads = [(-self.changelog.rev(h), h) for h in heads]
1175 return [n for (r, n) in sorted(heads)]
1175 return [n for (r, n) in sorted(heads)]
1176
1176
1177 def branchheads(self, branch=None, start=None, closed=False):
1177 def branchheads(self, branch=None, start=None, closed=False):
1178 '''return a (possibly filtered) list of heads for the given branch
1178 '''return a (possibly filtered) list of heads for the given branch
1179
1179
1180 Heads are returned in topological order, from newest to oldest.
1180 Heads are returned in topological order, from newest to oldest.
1181 If branch is None, use the dirstate branch.
1181 If branch is None, use the dirstate branch.
1182 If start is not None, return only heads reachable from start.
1182 If start is not None, return only heads reachable from start.
1183 If closed is True, return heads that are marked as closed as well.
1183 If closed is True, return heads that are marked as closed as well.
1184 '''
1184 '''
1185 if branch is None:
1185 if branch is None:
1186 branch = self[None].branch()
1186 branch = self[None].branch()
1187 branches = self.branchmap()
1187 branches = self.branchmap()
1188 if branch not in branches:
1188 if branch not in branches:
1189 return []
1189 return []
1190 # the cache returns heads ordered lowest to highest
1190 # the cache returns heads ordered lowest to highest
1191 bheads = list(reversed(branches[branch]))
1191 bheads = list(reversed(branches[branch]))
1192 if start is not None:
1192 if start is not None:
1193 # filter out the heads that cannot be reached from startrev
1193 # filter out the heads that cannot be reached from startrev
1194 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1194 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1195 bheads = [h for h in bheads if h in fbheads]
1195 bheads = [h for h in bheads if h in fbheads]
1196 if not closed:
1196 if not closed:
1197 bheads = [h for h in bheads if
1197 bheads = [h for h in bheads if
1198 ('close' not in self.changelog.read(h)[5])]
1198 ('close' not in self.changelog.read(h)[5])]
1199 return bheads
1199 return bheads
1200
1200
1201 def branches(self, nodes):
1201 def branches(self, nodes):
1202 if not nodes:
1202 if not nodes:
1203 nodes = [self.changelog.tip()]
1203 nodes = [self.changelog.tip()]
1204 b = []
1204 b = []
1205 for n in nodes:
1205 for n in nodes:
1206 t = n
1206 t = n
1207 while 1:
1207 while 1:
1208 p = self.changelog.parents(n)
1208 p = self.changelog.parents(n)
1209 if p[1] != nullid or p[0] == nullid:
1209 if p[1] != nullid or p[0] == nullid:
1210 b.append((t, n, p[0], p[1]))
1210 b.append((t, n, p[0], p[1]))
1211 break
1211 break
1212 n = p[0]
1212 n = p[0]
1213 return b
1213 return b
1214
1214
1215 def between(self, pairs):
1215 def between(self, pairs):
1216 r = []
1216 r = []
1217
1217
1218 for top, bottom in pairs:
1218 for top, bottom in pairs:
1219 n, l, i = top, [], 0
1219 n, l, i = top, [], 0
1220 f = 1
1220 f = 1
1221
1221
1222 while n != bottom and n != nullid:
1222 while n != bottom and n != nullid:
1223 p = self.changelog.parents(n)[0]
1223 p = self.changelog.parents(n)[0]
1224 if i == f:
1224 if i == f:
1225 l.append(n)
1225 l.append(n)
1226 f = f * 2
1226 f = f * 2
1227 n = p
1227 n = p
1228 i += 1
1228 i += 1
1229
1229
1230 r.append(l)
1230 r.append(l)
1231
1231
1232 return r
1232 return r
1233
1233
1234 def findincoming(self, remote, base=None, heads=None, force=False):
1234 def findincoming(self, remote, base=None, heads=None, force=False):
1235 """Return list of roots of the subsets of missing nodes from remote
1235 """Return list of roots of the subsets of missing nodes from remote
1236
1236
1237 If base dict is specified, assume that these nodes and their parents
1237 If base dict is specified, assume that these nodes and their parents
1238 exist on the remote side and that no child of a node of base exists
1238 exist on the remote side and that no child of a node of base exists
1239 in both remote and self.
1239 in both remote and self.
1240 Furthermore base will be updated to include the nodes that exists
1240 Furthermore base will be updated to include the nodes that exists
1241 in self and remote but no children exists in self and remote.
1241 in self and remote but no children exists in self and remote.
1242 If a list of heads is specified, return only nodes which are heads
1242 If a list of heads is specified, return only nodes which are heads
1243 or ancestors of these heads.
1243 or ancestors of these heads.
1244
1244
1245 All the ancestors of base are in self and in remote.
1245 All the ancestors of base are in self and in remote.
1246 All the descendants of the list returned are missing in self.
1246 All the descendants of the list returned are missing in self.
1247 (and so we know that the rest of the nodes are missing in remote, see
1247 (and so we know that the rest of the nodes are missing in remote, see
1248 outgoing)
1248 outgoing)
1249 """
1249 """
1250 return self.findcommonincoming(remote, base, heads, force)[1]
1250 return self.findcommonincoming(remote, base, heads, force)[1]
1251
1251
1252 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1252 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1253 """Return a tuple (common, missing roots, heads) used to identify
1253 """Return a tuple (common, missing roots, heads) used to identify
1254 missing nodes from remote.
1254 missing nodes from remote.
1255
1255
1256 If base dict is specified, assume that these nodes and their parents
1256 If base dict is specified, assume that these nodes and their parents
1257 exist on the remote side and that no child of a node of base exists
1257 exist on the remote side and that no child of a node of base exists
1258 in both remote and self.
1258 in both remote and self.
1259 Furthermore base will be updated to include the nodes that exists
1259 Furthermore base will be updated to include the nodes that exists
1260 in self and remote but no children exists in self and remote.
1260 in self and remote but no children exists in self and remote.
1261 If a list of heads is specified, return only nodes which are heads
1261 If a list of heads is specified, return only nodes which are heads
1262 or ancestors of these heads.
1262 or ancestors of these heads.
1263
1263
1264 All the ancestors of base are in self and in remote.
1264 All the ancestors of base are in self and in remote.
1265 """
1265 """
1266 m = self.changelog.nodemap
1266 m = self.changelog.nodemap
1267 search = []
1267 search = []
1268 fetch = set()
1268 fetch = set()
1269 seen = set()
1269 seen = set()
1270 seenbranch = set()
1270 seenbranch = set()
1271 if base is None:
1271 if base is None:
1272 base = {}
1272 base = {}
1273
1273
1274 if not heads:
1274 if not heads:
1275 heads = remote.heads()
1275 heads = remote.heads()
1276
1276
1277 if self.changelog.tip() == nullid:
1277 if self.changelog.tip() == nullid:
1278 base[nullid] = 1
1278 base[nullid] = 1
1279 if heads != [nullid]:
1279 if heads != [nullid]:
1280 return [nullid], [nullid], list(heads)
1280 return [nullid], [nullid], list(heads)
1281 return [nullid], [], []
1281 return [nullid], [], []
1282
1282
1283 # assume we're closer to the tip than the root
1283 # assume we're closer to the tip than the root
1284 # and start by examining the heads
1284 # and start by examining the heads
1285 self.ui.status(_("searching for changes\n"))
1285 self.ui.status(_("searching for changes\n"))
1286
1286
1287 unknown = []
1287 unknown = []
1288 for h in heads:
1288 for h in heads:
1289 if h not in m:
1289 if h not in m:
1290 unknown.append(h)
1290 unknown.append(h)
1291 else:
1291 else:
1292 base[h] = 1
1292 base[h] = 1
1293
1293
1294 heads = unknown
1294 heads = unknown
1295 if not unknown:
1295 if not unknown:
1296 return base.keys(), [], []
1296 return base.keys(), [], []
1297
1297
1298 req = set(unknown)
1298 req = set(unknown)
1299 reqcnt = 0
1299 reqcnt = 0
1300
1300
1301 # search through remote branches
1301 # search through remote branches
1302 # a 'branch' here is a linear segment of history, with four parts:
1302 # a 'branch' here is a linear segment of history, with four parts:
1303 # head, root, first parent, second parent
1303 # head, root, first parent, second parent
1304 # (a branch always has two parents (or none) by definition)
1304 # (a branch always has two parents (or none) by definition)
1305 unknown = remote.branches(unknown)
1305 unknown = remote.branches(unknown)
1306 while unknown:
1306 while unknown:
1307 r = []
1307 r = []
1308 while unknown:
1308 while unknown:
1309 n = unknown.pop(0)
1309 n = unknown.pop(0)
1310 if n[0] in seen:
1310 if n[0] in seen:
1311 continue
1311 continue
1312
1312
1313 self.ui.debug("examining %s:%s\n"
1313 self.ui.debug("examining %s:%s\n"
1314 % (short(n[0]), short(n[1])))
1314 % (short(n[0]), short(n[1])))
1315 if n[0] == nullid: # found the end of the branch
1315 if n[0] == nullid: # found the end of the branch
1316 pass
1316 pass
1317 elif n in seenbranch:
1317 elif n in seenbranch:
1318 self.ui.debug("branch already found\n")
1318 self.ui.debug("branch already found\n")
1319 continue
1319 continue
1320 elif n[1] and n[1] in m: # do we know the base?
1320 elif n[1] and n[1] in m: # do we know the base?
1321 self.ui.debug("found incomplete branch %s:%s\n"
1321 self.ui.debug("found incomplete branch %s:%s\n"
1322 % (short(n[0]), short(n[1])))
1322 % (short(n[0]), short(n[1])))
1323 search.append(n[0:2]) # schedule branch range for scanning
1323 search.append(n[0:2]) # schedule branch range for scanning
1324 seenbranch.add(n)
1324 seenbranch.add(n)
1325 else:
1325 else:
1326 if n[1] not in seen and n[1] not in fetch:
1326 if n[1] not in seen and n[1] not in fetch:
1327 if n[2] in m and n[3] in m:
1327 if n[2] in m and n[3] in m:
1328 self.ui.debug("found new changeset %s\n" %
1328 self.ui.debug("found new changeset %s\n" %
1329 short(n[1]))
1329 short(n[1]))
1330 fetch.add(n[1]) # earliest unknown
1330 fetch.add(n[1]) # earliest unknown
1331 for p in n[2:4]:
1331 for p in n[2:4]:
1332 if p in m:
1332 if p in m:
1333 base[p] = 1 # latest known
1333 base[p] = 1 # latest known
1334
1334
1335 for p in n[2:4]:
1335 for p in n[2:4]:
1336 if p not in req and p not in m:
1336 if p not in req and p not in m:
1337 r.append(p)
1337 r.append(p)
1338 req.add(p)
1338 req.add(p)
1339 seen.add(n[0])
1339 seen.add(n[0])
1340
1340
1341 if r:
1341 if r:
1342 reqcnt += 1
1342 reqcnt += 1
1343 self.ui.debug("request %d: %s\n" %
1343 self.ui.debug("request %d: %s\n" %
1344 (reqcnt, " ".join(map(short, r))))
1344 (reqcnt, " ".join(map(short, r))))
1345 for p in xrange(0, len(r), 10):
1345 for p in xrange(0, len(r), 10):
1346 for b in remote.branches(r[p:p + 10]):
1346 for b in remote.branches(r[p:p + 10]):
1347 self.ui.debug("received %s:%s\n" %
1347 self.ui.debug("received %s:%s\n" %
1348 (short(b[0]), short(b[1])))
1348 (short(b[0]), short(b[1])))
1349 unknown.append(b)
1349 unknown.append(b)
1350
1350
1351 # do binary search on the branches we found
1351 # do binary search on the branches we found
1352 while search:
1352 while search:
1353 newsearch = []
1353 newsearch = []
1354 reqcnt += 1
1354 reqcnt += 1
1355 for n, l in zip(search, remote.between(search)):
1355 for n, l in zip(search, remote.between(search)):
1356 l.append(n[1])
1356 l.append(n[1])
1357 p = n[0]
1357 p = n[0]
1358 f = 1
1358 f = 1
1359 for i in l:
1359 for i in l:
1360 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1360 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1361 if i in m:
1361 if i in m:
1362 if f <= 2:
1362 if f <= 2:
1363 self.ui.debug("found new branch changeset %s\n" %
1363 self.ui.debug("found new branch changeset %s\n" %
1364 short(p))
1364 short(p))
1365 fetch.add(p)
1365 fetch.add(p)
1366 base[i] = 1
1366 base[i] = 1
1367 else:
1367 else:
1368 self.ui.debug("narrowed branch search to %s:%s\n"
1368 self.ui.debug("narrowed branch search to %s:%s\n"
1369 % (short(p), short(i)))
1369 % (short(p), short(i)))
1370 newsearch.append((p, i))
1370 newsearch.append((p, i))
1371 break
1371 break
1372 p, f = i, f * 2
1372 p, f = i, f * 2
1373 search = newsearch
1373 search = newsearch
1374
1374
1375 # sanity check our fetch list
1375 # sanity check our fetch list
1376 for f in fetch:
1376 for f in fetch:
1377 if f in m:
1377 if f in m:
1378 raise error.RepoError(_("already have changeset ")
1378 raise error.RepoError(_("already have changeset ")
1379 + short(f[:4]))
1379 + short(f[:4]))
1380
1380
1381 if base.keys() == [nullid]:
1381 if base.keys() == [nullid]:
1382 if force:
1382 if force:
1383 self.ui.warn(_("warning: repository is unrelated\n"))
1383 self.ui.warn(_("warning: repository is unrelated\n"))
1384 else:
1384 else:
1385 raise util.Abort(_("repository is unrelated"))
1385 raise util.Abort(_("repository is unrelated"))
1386
1386
1387 self.ui.debug("found new changesets starting at " +
1387 self.ui.debug("found new changesets starting at " +
1388 " ".join([short(f) for f in fetch]) + "\n")
1388 " ".join([short(f) for f in fetch]) + "\n")
1389
1389
1390 self.ui.debug("%d total queries\n" % reqcnt)
1390 self.ui.debug("%d total queries\n" % reqcnt)
1391
1391
1392 return base.keys(), list(fetch), heads
1392 return base.keys(), list(fetch), heads
1393
1393
1394 def findoutgoing(self, remote, base=None, heads=None, force=False):
1394 def findoutgoing(self, remote, base=None, heads=None, force=False):
1395 """Return list of nodes that are roots of subsets not in remote
1395 """Return list of nodes that are roots of subsets not in remote
1396
1396
1397 If base dict is specified, assume that these nodes and their parents
1397 If base dict is specified, assume that these nodes and their parents
1398 exist on the remote side.
1398 exist on the remote side.
1399 If a list of heads is specified, return only nodes which are heads
1399 If a list of heads is specified, return only nodes which are heads
1400 or ancestors of these heads, and return a second element which
1400 or ancestors of these heads, and return a second element which
1401 contains all remote heads which get new children.
1401 contains all remote heads which get new children.
1402 """
1402 """
1403 if base is None:
1403 if base is None:
1404 base = {}
1404 base = {}
1405 self.findincoming(remote, base, heads, force=force)
1405 self.findincoming(remote, base, heads, force=force)
1406
1406
1407 self.ui.debug("common changesets up to "
1407 self.ui.debug("common changesets up to "
1408 + " ".join(map(short, base.keys())) + "\n")
1408 + " ".join(map(short, base.keys())) + "\n")
1409
1409
1410 remain = set(self.changelog.nodemap)
1410 remain = set(self.changelog.nodemap)
1411
1411
1412 # prune everything remote has from the tree
1412 # prune everything remote has from the tree
1413 remain.remove(nullid)
1413 remain.remove(nullid)
1414 remove = base.keys()
1414 remove = base.keys()
1415 while remove:
1415 while remove:
1416 n = remove.pop(0)
1416 n = remove.pop(0)
1417 if n in remain:
1417 if n in remain:
1418 remain.remove(n)
1418 remain.remove(n)
1419 for p in self.changelog.parents(n):
1419 for p in self.changelog.parents(n):
1420 remove.append(p)
1420 remove.append(p)
1421
1421
1422 # find every node whose parents have been pruned
1422 # find every node whose parents have been pruned
1423 subset = []
1423 subset = []
1424 # find every remote head that will get new children
1424 # find every remote head that will get new children
1425 updated_heads = set()
1425 updated_heads = set()
1426 for n in remain:
1426 for n in remain:
1427 p1, p2 = self.changelog.parents(n)
1427 p1, p2 = self.changelog.parents(n)
1428 if p1 not in remain and p2 not in remain:
1428 if p1 not in remain and p2 not in remain:
1429 subset.append(n)
1429 subset.append(n)
1430 if heads:
1430 if heads:
1431 if p1 in heads:
1431 if p1 in heads:
1432 updated_heads.add(p1)
1432 updated_heads.add(p1)
1433 if p2 in heads:
1433 if p2 in heads:
1434 updated_heads.add(p2)
1434 updated_heads.add(p2)
1435
1435
1436 # this is the set of all roots we have to push
1436 # this is the set of all roots we have to push
1437 if heads:
1437 if heads:
1438 return subset, list(updated_heads)
1438 return subset, list(updated_heads)
1439 else:
1439 else:
1440 return subset
1440 return subset
1441
1441
1442 def pull(self, remote, heads=None, force=False):
1442 def pull(self, remote, heads=None, force=False):
1443 lock = self.lock()
1443 lock = self.lock()
1444 try:
1444 try:
1445 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1445 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1446 force=force)
1446 force=force)
1447 if fetch == [nullid]:
1447 if fetch == [nullid]:
1448 self.ui.status(_("requesting all changes\n"))
1448 self.ui.status(_("requesting all changes\n"))
1449
1449
1450 if not fetch:
1450 if not fetch:
1451 self.ui.status(_("no changes found\n"))
1451 self.ui.status(_("no changes found\n"))
1452 return 0
1452 return 0
1453
1453
1454 if heads is None and remote.capable('changegroupsubset'):
1454 if heads is None and remote.capable('changegroupsubset'):
1455 heads = rheads
1455 heads = rheads
1456
1456
1457 if heads is None:
1457 if heads is None:
1458 cg = remote.changegroup(fetch, 'pull')
1458 cg = remote.changegroup(fetch, 'pull')
1459 else:
1459 else:
1460 if not remote.capable('changegroupsubset'):
1460 if not remote.capable('changegroupsubset'):
1461 raise util.Abort(_("Partial pull cannot be done because "
1461 raise util.Abort(_("Partial pull cannot be done because "
1462 "other repository doesn't support "
1462 "other repository doesn't support "
1463 "changegroupsubset."))
1463 "changegroupsubset."))
1464 cg = remote.changegroupsubset(fetch, heads, 'pull')
1464 cg = remote.changegroupsubset(fetch, heads, 'pull')
1465 return self.addchangegroup(cg, 'pull', remote.url())
1465 return self.addchangegroup(cg, 'pull', remote.url())
1466 finally:
1466 finally:
1467 lock.release()
1467 lock.release()
1468
1468
1469 def push(self, remote, force=False, revs=None):
1469 def push(self, remote, force=False, revs=None):
1470 # there are two ways to push to remote repo:
1470 # there are two ways to push to remote repo:
1471 #
1471 #
1472 # addchangegroup assumes local user can lock remote
1472 # addchangegroup assumes local user can lock remote
1473 # repo (local filesystem, old ssh servers).
1473 # repo (local filesystem, old ssh servers).
1474 #
1474 #
1475 # unbundle assumes local user cannot lock remote repo (new ssh
1475 # unbundle assumes local user cannot lock remote repo (new ssh
1476 # servers, http servers).
1476 # servers, http servers).
1477
1477
1478 if remote.capable('unbundle'):
1478 if remote.capable('unbundle'):
1479 return self.push_unbundle(remote, force, revs)
1479 return self.push_unbundle(remote, force, revs)
1480 return self.push_addchangegroup(remote, force, revs)
1480 return self.push_addchangegroup(remote, force, revs)
1481
1481
1482 def prepush(self, remote, force, revs):
1482 def prepush(self, remote, force, revs):
1483 '''Analyze the local and remote repositories and determine which
1483 '''Analyze the local and remote repositories and determine which
1484 changesets need to be pushed to the remote. Return a tuple
1484 changesets need to be pushed to the remote. Return a tuple
1485 (changegroup, remoteheads). changegroup is a readable file-like
1485 (changegroup, remoteheads). changegroup is a readable file-like
1486 object whose read() returns successive changegroup chunks ready to
1486 object whose read() returns successive changegroup chunks ready to
1487 be sent over the wire. remoteheads is the list of remote heads.
1487 be sent over the wire. remoteheads is the list of remote heads.
1488 '''
1488 '''
1489 common = {}
1489 common = {}
1490 remote_heads = remote.heads()
1490 remote_heads = remote.heads()
1491 inc = self.findincoming(remote, common, remote_heads, force=force)
1491 inc = self.findincoming(remote, common, remote_heads, force=force)
1492
1492
1493 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1493 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1494 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1494 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1495
1495
1496 def checkbranch(lheads, rheads, updatelb):
1496 def checkbranch(lheads, rheads, updatelb):
1497 '''
1497 '''
1498 check whether there are more local heads than remote heads on
1498 check whether there are more local heads than remote heads on
1499 a specific branch.
1499 a specific branch.
1500
1500
1501 lheads: local branch heads
1501 lheads: local branch heads
1502 rheads: remote branch heads
1502 rheads: remote branch heads
1503 updatelb: outgoing local branch bases
1503 updatelb: outgoing local branch bases
1504 '''
1504 '''
1505
1505
1506 warn = 0
1506 warn = 0
1507
1507
1508 if not revs and len(lheads) > len(rheads):
1508 if not revs and len(lheads) > len(rheads):
1509 warn = 1
1509 warn = 1
1510 else:
1510 else:
1511 # add local heads involved in the push
1511 # add local heads involved in the push
1512 updatelheads = [self.changelog.heads(x, lheads)
1512 updatelheads = [self.changelog.heads(x, lheads)
1513 for x in updatelb]
1513 for x in updatelb]
1514 newheads = set(sum(updatelheads, [])) & set(lheads)
1514 newheads = set(sum(updatelheads, [])) & set(lheads)
1515
1515
1516 if not newheads:
1516 if not newheads:
1517 return True
1517 return True
1518
1518
1519 # add heads we don't have or that are not involved in the push
1519 # add heads we don't have or that are not involved in the push
1520 for r in rheads:
1520 for r in rheads:
1521 if r in self.changelog.nodemap:
1521 if r in self.changelog.nodemap:
1522 desc = self.changelog.heads(r, heads)
1522 desc = self.changelog.heads(r, heads)
1523 l = [h for h in heads if h in desc]
1523 l = [h for h in heads if h in desc]
1524 if not l:
1524 if not l:
1525 newheads.add(r)
1525 newheads.add(r)
1526 else:
1526 else:
1527 newheads.add(r)
1527 newheads.add(r)
1528 if len(newheads) > len(rheads):
1528 if len(newheads) > len(rheads):
1529 warn = 1
1529 warn = 1
1530
1530
1531 if warn:
1531 if warn:
1532 if not rheads: # new branch requires --force
1532 if not rheads: # new branch requires --force
1533 self.ui.warn(_("abort: push creates new"
1533 self.ui.warn(_("abort: push creates new"
1534 " remote branch '%s'!\n") %
1534 " remote branch '%s'!\n") %
1535 self[lheads[0]].branch())
1535 self[lheads[0]].branch())
1536 else:
1536 else:
1537 self.ui.warn(_("abort: push creates new remote heads!\n"))
1537 self.ui.warn(_("abort: push creates new remote heads!\n"))
1538
1538
1539 self.ui.status(_("(did you forget to merge?"
1539 self.ui.status(_("(did you forget to merge?"
1540 " use push -f to force)\n"))
1540 " use push -f to force)\n"))
1541 return False
1541 return False
1542 return True
1542 return True
1543
1543
1544 if not bases:
1544 if not bases:
1545 self.ui.status(_("no changes found\n"))
1545 self.ui.status(_("no changes found\n"))
1546 return None, 1
1546 return None, 1
1547 elif not force:
1547 elif not force:
1548 # Check for each named branch if we're creating new remote heads.
1548 # Check for each named branch if we're creating new remote heads.
1549 # To be a remote head after push, node must be either:
1549 # To be a remote head after push, node must be either:
1550 # - unknown locally
1550 # - unknown locally
1551 # - a local outgoing head descended from update
1551 # - a local outgoing head descended from update
1552 # - a remote head that's known locally and not
1552 # - a remote head that's known locally and not
1553 # ancestral to an outgoing head
1553 # ancestral to an outgoing head
1554 #
1554 #
1555 # New named branches cannot be created without --force.
1555 # New named branches cannot be created without --force.
1556
1556
1557 if remote_heads != [nullid]:
1557 if remote_heads != [nullid]:
1558 if remote.capable('branchmap'):
1558 if remote.capable('branchmap'):
1559 localhds = {}
1559 localhds = {}
1560 if not revs:
1560 if not revs:
1561 localhds = self.branchmap()
1561 localhds = self.branchmap()
1562 else:
1562 else:
1563 for n in heads:
1563 for n in heads:
1564 branch = self[n].branch()
1564 branch = self[n].branch()
1565 if branch in localhds:
1565 if branch in localhds:
1566 localhds[branch].append(n)
1566 localhds[branch].append(n)
1567 else:
1567 else:
1568 localhds[branch] = [n]
1568 localhds[branch] = [n]
1569
1569
1570 remotehds = remote.branchmap()
1570 remotehds = remote.branchmap()
1571
1571
1572 for lh in localhds:
1572 for lh in localhds:
1573 if lh in remotehds:
1573 if lh in remotehds:
1574 rheads = remotehds[lh]
1574 rheads = remotehds[lh]
1575 else:
1575 else:
1576 rheads = []
1576 rheads = []
1577 lheads = localhds[lh]
1577 lheads = localhds[lh]
1578 if not checkbranch(lheads, rheads, update):
1578 if not checkbranch(lheads, rheads, update):
1579 return None, 0
1579 return None, 0
1580 else:
1580 else:
1581 if not checkbranch(heads, remote_heads, update):
1581 if not checkbranch(heads, remote_heads, update):
1582 return None, 0
1582 return None, 0
1583
1583
1584 if inc:
1584 if inc:
1585 self.ui.warn(_("note: unsynced remote changes!\n"))
1585 self.ui.warn(_("note: unsynced remote changes!\n"))
1586
1586
1587
1587
1588 if revs is None:
1588 if revs is None:
1589 # use the fast path, no race possible on push
1589 # use the fast path, no race possible on push
1590 nodes = self.changelog.findmissing(common.keys())
1590 nodes = self.changelog.findmissing(common.keys())
1591 cg = self._changegroup(nodes, 'push')
1591 cg = self._changegroup(nodes, 'push')
1592 else:
1592 else:
1593 cg = self.changegroupsubset(update, revs, 'push')
1593 cg = self.changegroupsubset(update, revs, 'push')
1594 return cg, remote_heads
1594 return cg, remote_heads
1595
1595
1596 def push_addchangegroup(self, remote, force, revs):
1596 def push_addchangegroup(self, remote, force, revs):
1597 lock = remote.lock()
1597 lock = remote.lock()
1598 try:
1598 try:
1599 ret = self.prepush(remote, force, revs)
1599 ret = self.prepush(remote, force, revs)
1600 if ret[0] is not None:
1600 if ret[0] is not None:
1601 cg, remote_heads = ret
1601 cg, remote_heads = ret
1602 return remote.addchangegroup(cg, 'push', self.url())
1602 return remote.addchangegroup(cg, 'push', self.url())
1603 return ret[1]
1603 return ret[1]
1604 finally:
1604 finally:
1605 lock.release()
1605 lock.release()
1606
1606
1607 def push_unbundle(self, remote, force, revs):
1607 def push_unbundle(self, remote, force, revs):
1608 # local repo finds heads on server, finds out what revs it
1608 # local repo finds heads on server, finds out what revs it
1609 # must push. once revs transferred, if server finds it has
1609 # must push. once revs transferred, if server finds it has
1610 # different heads (someone else won commit/push race), server
1610 # different heads (someone else won commit/push race), server
1611 # aborts.
1611 # aborts.
1612
1612
1613 ret = self.prepush(remote, force, revs)
1613 ret = self.prepush(remote, force, revs)
1614 if ret[0] is not None:
1614 if ret[0] is not None:
1615 cg, remote_heads = ret
1615 cg, remote_heads = ret
1616 if force:
1616 if force:
1617 remote_heads = ['force']
1617 remote_heads = ['force']
1618 return remote.unbundle(cg, remote_heads, 'push')
1618 return remote.unbundle(cg, remote_heads, 'push')
1619 return ret[1]
1619 return ret[1]
1620
1620
1621 def changegroupinfo(self, nodes, source):
1621 def changegroupinfo(self, nodes, source):
1622 if self.ui.verbose or source == 'bundle':
1622 if self.ui.verbose or source == 'bundle':
1623 self.ui.status(_("%d changesets found\n") % len(nodes))
1623 self.ui.status(_("%d changesets found\n") % len(nodes))
1624 if self.ui.debugflag:
1624 if self.ui.debugflag:
1625 self.ui.debug("list of changesets:\n")
1625 self.ui.debug("list of changesets:\n")
1626 for node in nodes:
1626 for node in nodes:
1627 self.ui.debug("%s\n" % hex(node))
1627 self.ui.debug("%s\n" % hex(node))
1628
1628
1629 def changegroupsubset(self, bases, heads, source, extranodes=None):
1629 def changegroupsubset(self, bases, heads, source, extranodes=None):
1630 """Compute a changegroup consisting of all the nodes that are
1630 """Compute a changegroup consisting of all the nodes that are
1631 descendents of any of the bases and ancestors of any of the heads.
1631 descendents of any of the bases and ancestors of any of the heads.
1632 Return a chunkbuffer object whose read() method will return
1632 Return a chunkbuffer object whose read() method will return
1633 successive changegroup chunks.
1633 successive changegroup chunks.
1634
1634
1635 It is fairly complex as determining which filenodes and which
1635 It is fairly complex as determining which filenodes and which
1636 manifest nodes need to be included for the changeset to be complete
1636 manifest nodes need to be included for the changeset to be complete
1637 is non-trivial.
1637 is non-trivial.
1638
1638
1639 Another wrinkle is doing the reverse, figuring out which changeset in
1639 Another wrinkle is doing the reverse, figuring out which changeset in
1640 the changegroup a particular filenode or manifestnode belongs to.
1640 the changegroup a particular filenode or manifestnode belongs to.
1641
1641
1642 The caller can specify some nodes that must be included in the
1642 The caller can specify some nodes that must be included in the
1643 changegroup using the extranodes argument. It should be a dict
1643 changegroup using the extranodes argument. It should be a dict
1644 where the keys are the filenames (or 1 for the manifest), and the
1644 where the keys are the filenames (or 1 for the manifest), and the
1645 values are lists of (node, linknode) tuples, where node is a wanted
1645 values are lists of (node, linknode) tuples, where node is a wanted
1646 node and linknode is the changelog node that should be transmitted as
1646 node and linknode is the changelog node that should be transmitted as
1647 the linkrev.
1647 the linkrev.
1648 """
1648 """
1649
1649
1650 # Set up some initial variables
1650 # Set up some initial variables
1651 # Make it easy to refer to self.changelog
1651 # Make it easy to refer to self.changelog
1652 cl = self.changelog
1652 cl = self.changelog
1653 # msng is short for missing - compute the list of changesets in this
1653 # msng is short for missing - compute the list of changesets in this
1654 # changegroup.
1654 # changegroup.
1655 if not bases:
1655 if not bases:
1656 bases = [nullid]
1656 bases = [nullid]
1657 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1657 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1658
1658
1659 if extranodes is None:
1659 if extranodes is None:
1660 # can we go through the fast path ?
1660 # can we go through the fast path ?
1661 heads.sort()
1661 heads.sort()
1662 allheads = self.heads()
1662 allheads = self.heads()
1663 allheads.sort()
1663 allheads.sort()
1664 if heads == allheads:
1664 if heads == allheads:
1665 return self._changegroup(msng_cl_lst, source)
1665 return self._changegroup(msng_cl_lst, source)
1666
1666
1667 # slow path
1667 # slow path
1668 self.hook('preoutgoing', throw=True, source=source)
1668 self.hook('preoutgoing', throw=True, source=source)
1669
1669
1670 self.changegroupinfo(msng_cl_lst, source)
1670 self.changegroupinfo(msng_cl_lst, source)
1671 # Some bases may turn out to be superfluous, and some heads may be
1671 # Some bases may turn out to be superfluous, and some heads may be
1672 # too. nodesbetween will return the minimal set of bases and heads
1672 # too. nodesbetween will return the minimal set of bases and heads
1673 # necessary to re-create the changegroup.
1673 # necessary to re-create the changegroup.
1674
1674
1675 # Known heads are the list of heads that it is assumed the recipient
1675 # Known heads are the list of heads that it is assumed the recipient
1676 # of this changegroup will know about.
1676 # of this changegroup will know about.
1677 knownheads = set()
1677 knownheads = set()
1678 # We assume that all parents of bases are known heads.
1678 # We assume that all parents of bases are known heads.
1679 for n in bases:
1679 for n in bases:
1680 knownheads.update(cl.parents(n))
1680 knownheads.update(cl.parents(n))
1681 knownheads.discard(nullid)
1681 knownheads.discard(nullid)
1682 knownheads = list(knownheads)
1682 knownheads = list(knownheads)
1683 if knownheads:
1683 if knownheads:
1684 # Now that we know what heads are known, we can compute which
1684 # Now that we know what heads are known, we can compute which
1685 # changesets are known. The recipient must know about all
1685 # changesets are known. The recipient must know about all
1686 # changesets required to reach the known heads from the null
1686 # changesets required to reach the known heads from the null
1687 # changeset.
1687 # changeset.
1688 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1688 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1689 junk = None
1689 junk = None
1690 # Transform the list into a set.
1690 # Transform the list into a set.
1691 has_cl_set = set(has_cl_set)
1691 has_cl_set = set(has_cl_set)
1692 else:
1692 else:
1693 # If there were no known heads, the recipient cannot be assumed to
1693 # If there were no known heads, the recipient cannot be assumed to
1694 # know about any changesets.
1694 # know about any changesets.
1695 has_cl_set = set()
1695 has_cl_set = set()
1696
1696
1697 # Make it easy to refer to self.manifest
1697 # Make it easy to refer to self.manifest
1698 mnfst = self.manifest
1698 mnfst = self.manifest
1699 # We don't know which manifests are missing yet
1699 # We don't know which manifests are missing yet
1700 msng_mnfst_set = {}
1700 msng_mnfst_set = {}
1701 # Nor do we know which filenodes are missing.
1701 # Nor do we know which filenodes are missing.
1702 msng_filenode_set = {}
1702 msng_filenode_set = {}
1703
1703
1704 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1704 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1705 junk = None
1705 junk = None
1706
1706
1707 # A changeset always belongs to itself, so the changenode lookup
1707 # A changeset always belongs to itself, so the changenode lookup
1708 # function for a changenode is identity.
1708 # function for a changenode is identity.
1709 def identity(x):
1709 def identity(x):
1710 return x
1710 return x
1711
1711
1712 # If we determine that a particular file or manifest node must be a
1712 # If we determine that a particular file or manifest node must be a
1713 # node that the recipient of the changegroup will already have, we can
1713 # node that the recipient of the changegroup will already have, we can
1714 # also assume the recipient will have all the parents. This function
1714 # also assume the recipient will have all the parents. This function
1715 # prunes them from the set of missing nodes.
1715 # prunes them from the set of missing nodes.
1716 def prune_parents(revlog, hasset, msngset):
1716 def prune_parents(revlog, hasset, msngset):
1717 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1717 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1718 msngset.pop(revlog.node(r), None)
1718 msngset.pop(revlog.node(r), None)
1719
1719
1720 # This is a function generating function used to set up an environment
1720 # This is a function generating function used to set up an environment
1721 # for the inner function to execute in.
1721 # for the inner function to execute in.
1722 def manifest_and_file_collector(changedfileset):
1722 def manifest_and_file_collector(changedfileset):
1723 # This is an information gathering function that gathers
1723 # This is an information gathering function that gathers
1724 # information from each changeset node that goes out as part of
1724 # information from each changeset node that goes out as part of
1725 # the changegroup. The information gathered is a list of which
1725 # the changegroup. The information gathered is a list of which
1726 # manifest nodes are potentially required (the recipient may
1726 # manifest nodes are potentially required (the recipient may
1727 # already have them) and total list of all files which were
1727 # already have them) and total list of all files which were
1728 # changed in any changeset in the changegroup.
1728 # changed in any changeset in the changegroup.
1729 #
1729 #
1730 # We also remember the first changenode we saw any manifest
1730 # We also remember the first changenode we saw any manifest
1731 # referenced by so we can later determine which changenode 'owns'
1731 # referenced by so we can later determine which changenode 'owns'
1732 # the manifest.
1732 # the manifest.
1733 def collect_manifests_and_files(clnode):
1733 def collect_manifests_and_files(clnode):
1734 c = cl.read(clnode)
1734 c = cl.read(clnode)
1735 for f in c[3]:
1735 for f in c[3]:
1736 # This is to make sure we only have one instance of each
1736 # This is to make sure we only have one instance of each
1737 # filename string for each filename.
1737 # filename string for each filename.
1738 changedfileset.setdefault(f, f)
1738 changedfileset.setdefault(f, f)
1739 msng_mnfst_set.setdefault(c[0], clnode)
1739 msng_mnfst_set.setdefault(c[0], clnode)
1740 return collect_manifests_and_files
1740 return collect_manifests_and_files
1741
1741
1742 # Figure out which manifest nodes (of the ones we think might be part
1742 # Figure out which manifest nodes (of the ones we think might be part
1743 # of the changegroup) the recipient must know about and remove them
1743 # of the changegroup) the recipient must know about and remove them
1744 # from the changegroup.
1744 # from the changegroup.
1745 def prune_manifests():
1745 def prune_manifests():
1746 has_mnfst_set = set()
1746 has_mnfst_set = set()
1747 for n in msng_mnfst_set:
1747 for n in msng_mnfst_set:
1748 # If a 'missing' manifest thinks it belongs to a changenode
1748 # If a 'missing' manifest thinks it belongs to a changenode
1749 # the recipient is assumed to have, obviously the recipient
1749 # the recipient is assumed to have, obviously the recipient
1750 # must have that manifest.
1750 # must have that manifest.
1751 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1751 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1752 if linknode in has_cl_set:
1752 if linknode in has_cl_set:
1753 has_mnfst_set.add(n)
1753 has_mnfst_set.add(n)
1754 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1754 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1755
1755
1756 # Use the information collected in collect_manifests_and_files to say
1756 # Use the information collected in collect_manifests_and_files to say
1757 # which changenode any manifestnode belongs to.
1757 # which changenode any manifestnode belongs to.
1758 def lookup_manifest_link(mnfstnode):
1758 def lookup_manifest_link(mnfstnode):
1759 return msng_mnfst_set[mnfstnode]
1759 return msng_mnfst_set[mnfstnode]
1760
1760
1761 # A function generating function that sets up the initial environment
1761 # A function generating function that sets up the initial environment
1762 # the inner function.
1762 # the inner function.
1763 def filenode_collector(changedfiles):
1763 def filenode_collector(changedfiles):
1764 # This gathers information from each manifestnode included in the
1764 # This gathers information from each manifestnode included in the
1765 # changegroup about which filenodes the manifest node references
1765 # changegroup about which filenodes the manifest node references
1766 # so we can include those in the changegroup too.
1766 # so we can include those in the changegroup too.
1767 #
1767 #
1768 # It also remembers which changenode each filenode belongs to. It
1768 # It also remembers which changenode each filenode belongs to. It
1769 # does this by assuming the a filenode belongs to the changenode
1769 # does this by assuming the a filenode belongs to the changenode
1770 # the first manifest that references it belongs to.
1770 # the first manifest that references it belongs to.
1771 def collect_msng_filenodes(mnfstnode):
1771 def collect_msng_filenodes(mnfstnode):
1772 r = mnfst.rev(mnfstnode)
1772 r = mnfst.rev(mnfstnode)
1773 if r - 1 in mnfst.parentrevs(r):
1773 if r - 1 in mnfst.parentrevs(r):
1774 # If the previous rev is one of the parents,
1774 # If the previous rev is one of the parents,
1775 # we only need to see a diff.
1775 # we only need to see a diff.
1776 deltamf = mnfst.readdelta(mnfstnode)
1776 deltamf = mnfst.readdelta(mnfstnode)
1777 # For each line in the delta
1777 # For each line in the delta
1778 for f, fnode in deltamf.iteritems():
1778 for f, fnode in deltamf.iteritems():
1779 f = changedfiles.get(f, None)
1779 f = changedfiles.get(f, None)
1780 # And if the file is in the list of files we care
1780 # And if the file is in the list of files we care
1781 # about.
1781 # about.
1782 if f is not None:
1782 if f is not None:
1783 # Get the changenode this manifest belongs to
1783 # Get the changenode this manifest belongs to
1784 clnode = msng_mnfst_set[mnfstnode]
1784 clnode = msng_mnfst_set[mnfstnode]
1785 # Create the set of filenodes for the file if
1785 # Create the set of filenodes for the file if
1786 # there isn't one already.
1786 # there isn't one already.
1787 ndset = msng_filenode_set.setdefault(f, {})
1787 ndset = msng_filenode_set.setdefault(f, {})
1788 # And set the filenode's changelog node to the
1788 # And set the filenode's changelog node to the
1789 # manifest's if it hasn't been set already.
1789 # manifest's if it hasn't been set already.
1790 ndset.setdefault(fnode, clnode)
1790 ndset.setdefault(fnode, clnode)
1791 else:
1791 else:
1792 # Otherwise we need a full manifest.
1792 # Otherwise we need a full manifest.
1793 m = mnfst.read(mnfstnode)
1793 m = mnfst.read(mnfstnode)
1794 # For every file in we care about.
1794 # For every file in we care about.
1795 for f in changedfiles:
1795 for f in changedfiles:
1796 fnode = m.get(f, None)
1796 fnode = m.get(f, None)
1797 # If it's in the manifest
1797 # If it's in the manifest
1798 if fnode is not None:
1798 if fnode is not None:
1799 # See comments above.
1799 # See comments above.
1800 clnode = msng_mnfst_set[mnfstnode]
1800 clnode = msng_mnfst_set[mnfstnode]
1801 ndset = msng_filenode_set.setdefault(f, {})
1801 ndset = msng_filenode_set.setdefault(f, {})
1802 ndset.setdefault(fnode, clnode)
1802 ndset.setdefault(fnode, clnode)
1803 return collect_msng_filenodes
1803 return collect_msng_filenodes
1804
1804
1805 # We have a list of filenodes we think we need for a file, lets remove
1805 # We have a list of filenodes we think we need for a file, lets remove
1806 # all those we know the recipient must have.
1806 # all those we know the recipient must have.
1807 def prune_filenodes(f, filerevlog):
1807 def prune_filenodes(f, filerevlog):
1808 msngset = msng_filenode_set[f]
1808 msngset = msng_filenode_set[f]
1809 hasset = set()
1809 hasset = set()
1810 # If a 'missing' filenode thinks it belongs to a changenode we
1810 # If a 'missing' filenode thinks it belongs to a changenode we
1811 # assume the recipient must have, then the recipient must have
1811 # assume the recipient must have, then the recipient must have
1812 # that filenode.
1812 # that filenode.
1813 for n in msngset:
1813 for n in msngset:
1814 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1814 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1815 if clnode in has_cl_set:
1815 if clnode in has_cl_set:
1816 hasset.add(n)
1816 hasset.add(n)
1817 prune_parents(filerevlog, hasset, msngset)
1817 prune_parents(filerevlog, hasset, msngset)
1818
1818
1819 # A function generator function that sets up the a context for the
1819 # A function generator function that sets up the a context for the
1820 # inner function.
1820 # inner function.
1821 def lookup_filenode_link_func(fname):
1821 def lookup_filenode_link_func(fname):
1822 msngset = msng_filenode_set[fname]
1822 msngset = msng_filenode_set[fname]
1823 # Lookup the changenode the filenode belongs to.
1823 # Lookup the changenode the filenode belongs to.
1824 def lookup_filenode_link(fnode):
1824 def lookup_filenode_link(fnode):
1825 return msngset[fnode]
1825 return msngset[fnode]
1826 return lookup_filenode_link
1826 return lookup_filenode_link
1827
1827
1828 # Add the nodes that were explicitly requested.
1828 # Add the nodes that were explicitly requested.
1829 def add_extra_nodes(name, nodes):
1829 def add_extra_nodes(name, nodes):
1830 if not extranodes or name not in extranodes:
1830 if not extranodes or name not in extranodes:
1831 return
1831 return
1832
1832
1833 for node, linknode in extranodes[name]:
1833 for node, linknode in extranodes[name]:
1834 if node not in nodes:
1834 if node not in nodes:
1835 nodes[node] = linknode
1835 nodes[node] = linknode
1836
1836
1837 # Now that we have all theses utility functions to help out and
1837 # Now that we have all theses utility functions to help out and
1838 # logically divide up the task, generate the group.
1838 # logically divide up the task, generate the group.
1839 def gengroup():
1839 def gengroup():
1840 # The set of changed files starts empty.
1840 # The set of changed files starts empty.
1841 changedfiles = {}
1841 changedfiles = {}
1842 # Create a changenode group generator that will call our functions
1842 # Create a changenode group generator that will call our functions
1843 # back to lookup the owning changenode and collect information.
1843 # back to lookup the owning changenode and collect information.
1844 group = cl.group(msng_cl_lst, identity,
1844 group = cl.group(msng_cl_lst, identity,
1845 manifest_and_file_collector(changedfiles))
1845 manifest_and_file_collector(changedfiles))
1846 for chnk in group:
1846 for chnk in group:
1847 yield chnk
1847 yield chnk
1848
1848
1849 # The list of manifests has been collected by the generator
1849 # The list of manifests has been collected by the generator
1850 # calling our functions back.
1850 # calling our functions back.
1851 prune_manifests()
1851 prune_manifests()
1852 add_extra_nodes(1, msng_mnfst_set)
1852 add_extra_nodes(1, msng_mnfst_set)
1853 msng_mnfst_lst = msng_mnfst_set.keys()
1853 msng_mnfst_lst = msng_mnfst_set.keys()
1854 # Sort the manifestnodes by revision number.
1854 # Sort the manifestnodes by revision number.
1855 msng_mnfst_lst.sort(key=mnfst.rev)
1855 msng_mnfst_lst.sort(key=mnfst.rev)
1856 # Create a generator for the manifestnodes that calls our lookup
1856 # Create a generator for the manifestnodes that calls our lookup
1857 # and data collection functions back.
1857 # and data collection functions back.
1858 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1858 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1859 filenode_collector(changedfiles))
1859 filenode_collector(changedfiles))
1860 for chnk in group:
1860 for chnk in group:
1861 yield chnk
1861 yield chnk
1862
1862
1863 # These are no longer needed, dereference and toss the memory for
1863 # These are no longer needed, dereference and toss the memory for
1864 # them.
1864 # them.
1865 msng_mnfst_lst = None
1865 msng_mnfst_lst = None
1866 msng_mnfst_set.clear()
1866 msng_mnfst_set.clear()
1867
1867
1868 if extranodes:
1868 if extranodes:
1869 for fname in extranodes:
1869 for fname in extranodes:
1870 if isinstance(fname, int):
1870 if isinstance(fname, int):
1871 continue
1871 continue
1872 msng_filenode_set.setdefault(fname, {})
1872 msng_filenode_set.setdefault(fname, {})
1873 changedfiles[fname] = 1
1873 changedfiles[fname] = 1
1874 # Go through all our files in order sorted by name.
1874 # Go through all our files in order sorted by name.
1875 for fname in sorted(changedfiles):
1875 for fname in sorted(changedfiles):
1876 filerevlog = self.file(fname)
1876 filerevlog = self.file(fname)
1877 if not len(filerevlog):
1877 if not len(filerevlog):
1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 # Toss out the filenodes that the recipient isn't really
1879 # Toss out the filenodes that the recipient isn't really
1880 # missing.
1880 # missing.
1881 if fname in msng_filenode_set:
1881 if fname in msng_filenode_set:
1882 prune_filenodes(fname, filerevlog)
1882 prune_filenodes(fname, filerevlog)
1883 add_extra_nodes(fname, msng_filenode_set[fname])
1883 add_extra_nodes(fname, msng_filenode_set[fname])
1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1885 else:
1885 else:
1886 msng_filenode_lst = []
1886 msng_filenode_lst = []
1887 # If any filenodes are left, generate the group for them,
1887 # If any filenodes are left, generate the group for them,
1888 # otherwise don't bother.
1888 # otherwise don't bother.
1889 if len(msng_filenode_lst) > 0:
1889 if len(msng_filenode_lst) > 0:
1890 yield changegroup.chunkheader(len(fname))
1890 yield changegroup.chunkheader(len(fname))
1891 yield fname
1891 yield fname
1892 # Sort the filenodes by their revision #
1892 # Sort the filenodes by their revision #
1893 msng_filenode_lst.sort(key=filerevlog.rev)
1893 msng_filenode_lst.sort(key=filerevlog.rev)
1894 # Create a group generator and only pass in a changenode
1894 # Create a group generator and only pass in a changenode
1895 # lookup function as we need to collect no information
1895 # lookup function as we need to collect no information
1896 # from filenodes.
1896 # from filenodes.
1897 group = filerevlog.group(msng_filenode_lst,
1897 group = filerevlog.group(msng_filenode_lst,
1898 lookup_filenode_link_func(fname))
1898 lookup_filenode_link_func(fname))
1899 for chnk in group:
1899 for chnk in group:
1900 yield chnk
1900 yield chnk
1901 if fname in msng_filenode_set:
1901 if fname in msng_filenode_set:
1902 # Don't need this anymore, toss it to free memory.
1902 # Don't need this anymore, toss it to free memory.
1903 del msng_filenode_set[fname]
1903 del msng_filenode_set[fname]
1904 # Signal that no more groups are left.
1904 # Signal that no more groups are left.
1905 yield changegroup.closechunk()
1905 yield changegroup.closechunk()
1906
1906
1907 if msng_cl_lst:
1907 if msng_cl_lst:
1908 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1908 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1909
1909
1910 return util.chunkbuffer(gengroup())
1910 return util.chunkbuffer(gengroup())
1911
1911
1912 def changegroup(self, basenodes, source):
1912 def changegroup(self, basenodes, source):
1913 # to avoid a race we use changegroupsubset() (issue1320)
1913 # to avoid a race we use changegroupsubset() (issue1320)
1914 return self.changegroupsubset(basenodes, self.heads(), source)
1914 return self.changegroupsubset(basenodes, self.heads(), source)
1915
1915
1916 def _changegroup(self, nodes, source):
1916 def _changegroup(self, nodes, source):
1917 """Compute the changegroup of all nodes that we have that a recipient
1917 """Compute the changegroup of all nodes that we have that a recipient
1918 doesn't. Return a chunkbuffer object whose read() method will return
1918 doesn't. Return a chunkbuffer object whose read() method will return
1919 successive changegroup chunks.
1919 successive changegroup chunks.
1920
1920
1921 This is much easier than the previous function as we can assume that
1921 This is much easier than the previous function as we can assume that
1922 the recipient has any changenode we aren't sending them.
1922 the recipient has any changenode we aren't sending them.
1923
1923
1924 nodes is the set of nodes to send"""
1924 nodes is the set of nodes to send"""
1925
1925
1926 self.hook('preoutgoing', throw=True, source=source)
1926 self.hook('preoutgoing', throw=True, source=source)
1927
1927
1928 cl = self.changelog
1928 cl = self.changelog
1929 revset = set([cl.rev(n) for n in nodes])
1929 revset = set([cl.rev(n) for n in nodes])
1930 self.changegroupinfo(nodes, source)
1930 self.changegroupinfo(nodes, source)
1931
1931
1932 def identity(x):
1932 def identity(x):
1933 return x
1933 return x
1934
1934
1935 def gennodelst(log):
1935 def gennodelst(log):
1936 for r in log:
1936 for r in log:
1937 if log.linkrev(r) in revset:
1937 if log.linkrev(r) in revset:
1938 yield log.node(r)
1938 yield log.node(r)
1939
1939
1940 def changed_file_collector(changedfileset):
1940 def changed_file_collector(changedfileset):
1941 def collect_changed_files(clnode):
1941 def collect_changed_files(clnode):
1942 c = cl.read(clnode)
1942 c = cl.read(clnode)
1943 changedfileset.update(c[3])
1943 changedfileset.update(c[3])
1944 return collect_changed_files
1944 return collect_changed_files
1945
1945
1946 def lookuprevlink_func(revlog):
1946 def lookuprevlink_func(revlog):
1947 def lookuprevlink(n):
1947 def lookuprevlink(n):
1948 return cl.node(revlog.linkrev(revlog.rev(n)))
1948 return cl.node(revlog.linkrev(revlog.rev(n)))
1949 return lookuprevlink
1949 return lookuprevlink
1950
1950
1951 def gengroup():
1951 def gengroup():
1952 '''yield a sequence of changegroup chunks (strings)'''
1952 '''yield a sequence of changegroup chunks (strings)'''
1953 # construct a list of all changed files
1953 # construct a list of all changed files
1954 changedfiles = set()
1954 changedfiles = set()
1955
1955
1956 for chnk in cl.group(nodes, identity,
1956 for chnk in cl.group(nodes, identity,
1957 changed_file_collector(changedfiles)):
1957 changed_file_collector(changedfiles)):
1958 yield chnk
1958 yield chnk
1959
1959
1960 mnfst = self.manifest
1960 mnfst = self.manifest
1961 nodeiter = gennodelst(mnfst)
1961 nodeiter = gennodelst(mnfst)
1962 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1962 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1963 yield chnk
1963 yield chnk
1964
1964
1965 for fname in sorted(changedfiles):
1965 for fname in sorted(changedfiles):
1966 filerevlog = self.file(fname)
1966 filerevlog = self.file(fname)
1967 if not len(filerevlog):
1967 if not len(filerevlog):
1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1969 nodeiter = gennodelst(filerevlog)
1969 nodeiter = gennodelst(filerevlog)
1970 nodeiter = list(nodeiter)
1970 nodeiter = list(nodeiter)
1971 if nodeiter:
1971 if nodeiter:
1972 yield changegroup.chunkheader(len(fname))
1972 yield changegroup.chunkheader(len(fname))
1973 yield fname
1973 yield fname
1974 lookup = lookuprevlink_func(filerevlog)
1974 lookup = lookuprevlink_func(filerevlog)
1975 for chnk in filerevlog.group(nodeiter, lookup):
1975 for chnk in filerevlog.group(nodeiter, lookup):
1976 yield chnk
1976 yield chnk
1977
1977
1978 yield changegroup.closechunk()
1978 yield changegroup.closechunk()
1979
1979
1980 if nodes:
1980 if nodes:
1981 self.hook('outgoing', node=hex(nodes[0]), source=source)
1981 self.hook('outgoing', node=hex(nodes[0]), source=source)
1982
1982
1983 return util.chunkbuffer(gengroup())
1983 return util.chunkbuffer(gengroup())
1984
1984
1985 def addchangegroup(self, source, srctype, url, emptyok=False):
1985 def addchangegroup(self, source, srctype, url, emptyok=False):
1986 """add changegroup to repo.
1986 """add changegroup to repo.
1987
1987
1988 return values:
1988 return values:
1989 - nothing changed or no source: 0
1989 - nothing changed or no source: 0
1990 - more heads than before: 1+added heads (2..n)
1990 - more heads than before: 1+added heads (2..n)
1991 - less heads than before: -1-removed heads (-2..-n)
1991 - less heads than before: -1-removed heads (-2..-n)
1992 - number of heads stays the same: 1
1992 - number of heads stays the same: 1
1993 """
1993 """
1994 def csmap(x):
1994 def csmap(x):
1995 self.ui.debug("add changeset %s\n" % short(x))
1995 self.ui.debug("add changeset %s\n" % short(x))
1996 return len(cl)
1996 return len(cl)
1997
1997
1998 def revmap(x):
1998 def revmap(x):
1999 return cl.rev(x)
1999 return cl.rev(x)
2000
2000
2001 if not source:
2001 if not source:
2002 return 0
2002 return 0
2003
2003
2004 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2004 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2005
2005
2006 changesets = files = revisions = 0
2006 changesets = files = revisions = 0
2007
2007
2008 # write changelog data to temp files so concurrent readers will not see
2008 # write changelog data to temp files so concurrent readers will not see
2009 # inconsistent view
2009 # inconsistent view
2010 cl = self.changelog
2010 cl = self.changelog
2011 cl.delayupdate()
2011 cl.delayupdate()
2012 oldheads = len(cl.heads())
2012 oldheads = len(cl.heads())
2013
2013
2014 tr = self.transaction()
2014 tr = self.transaction()
2015 try:
2015 try:
2016 trp = weakref.proxy(tr)
2016 trp = weakref.proxy(tr)
2017 # pull off the changeset group
2017 # pull off the changeset group
2018 self.ui.status(_("adding changesets\n"))
2018 self.ui.status(_("adding changesets\n"))
2019 clstart = len(cl)
2019 clstart = len(cl)
2020 chunkiter = changegroup.chunkiter(source)
2020 chunkiter = changegroup.chunkiter(source)
2021 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2021 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2022 raise util.Abort(_("received changelog group is empty"))
2022 raise util.Abort(_("received changelog group is empty"))
2023 clend = len(cl)
2023 clend = len(cl)
2024 changesets = clend - clstart
2024 changesets = clend - clstart
2025
2025
2026 # pull off the manifest group
2026 # pull off the manifest group
2027 self.ui.status(_("adding manifests\n"))
2027 self.ui.status(_("adding manifests\n"))
2028 chunkiter = changegroup.chunkiter(source)
2028 chunkiter = changegroup.chunkiter(source)
2029 # no need to check for empty manifest group here:
2029 # no need to check for empty manifest group here:
2030 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2030 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2031 # no new manifest will be created and the manifest group will
2031 # no new manifest will be created and the manifest group will
2032 # be empty during the pull
2032 # be empty during the pull
2033 self.manifest.addgroup(chunkiter, revmap, trp)
2033 self.manifest.addgroup(chunkiter, revmap, trp)
2034
2034
2035 # process the files
2035 # process the files
2036 self.ui.status(_("adding file changes\n"))
2036 self.ui.status(_("adding file changes\n"))
2037 while 1:
2037 while 1:
2038 f = changegroup.getchunk(source)
2038 f = changegroup.getchunk(source)
2039 if not f:
2039 if not f:
2040 break
2040 break
2041 self.ui.debug("adding %s revisions\n" % f)
2041 self.ui.debug("adding %s revisions\n" % f)
2042 fl = self.file(f)
2042 fl = self.file(f)
2043 o = len(fl)
2043 o = len(fl)
2044 chunkiter = changegroup.chunkiter(source)
2044 chunkiter = changegroup.chunkiter(source)
2045 if fl.addgroup(chunkiter, revmap, trp) is None:
2045 if fl.addgroup(chunkiter, revmap, trp) is None:
2046 raise util.Abort(_("received file revlog group is empty"))
2046 raise util.Abort(_("received file revlog group is empty"))
2047 revisions += len(fl) - o
2047 revisions += len(fl) - o
2048 files += 1
2048 files += 1
2049
2049
2050 newheads = len(cl.heads())
2050 newheads = len(cl.heads())
2051 heads = ""
2051 heads = ""
2052 if oldheads and newheads != oldheads:
2052 if oldheads and newheads != oldheads:
2053 heads = _(" (%+d heads)") % (newheads - oldheads)
2053 heads = _(" (%+d heads)") % (newheads - oldheads)
2054
2054
2055 self.ui.status(_("added %d changesets"
2055 self.ui.status(_("added %d changesets"
2056 " with %d changes to %d files%s\n")
2056 " with %d changes to %d files%s\n")
2057 % (changesets, revisions, files, heads))
2057 % (changesets, revisions, files, heads))
2058
2058
2059 if changesets > 0:
2059 if changesets > 0:
2060 p = lambda: cl.writepending() and self.root or ""
2060 p = lambda: cl.writepending() and self.root or ""
2061 self.hook('pretxnchangegroup', throw=True,
2061 self.hook('pretxnchangegroup', throw=True,
2062 node=hex(cl.node(clstart)), source=srctype,
2062 node=hex(cl.node(clstart)), source=srctype,
2063 url=url, pending=p)
2063 url=url, pending=p)
2064
2064
2065 # make changelog see real files again
2065 # make changelog see real files again
2066 cl.finalize(trp)
2066 cl.finalize(trp)
2067
2067
2068 tr.close()
2068 tr.close()
2069 finally:
2069 finally:
2070 del tr
2070 del tr
2071
2071
2072 if changesets > 0:
2072 if changesets > 0:
2073 # forcefully update the on-disk branch cache
2073 # forcefully update the on-disk branch cache
2074 self.ui.debug("updating the branch cache\n")
2074 self.ui.debug("updating the branch cache\n")
2075 self.branchtags()
2075 self.branchtags()
2076 self.hook("changegroup", node=hex(cl.node(clstart)),
2076 self.hook("changegroup", node=hex(cl.node(clstart)),
2077 source=srctype, url=url)
2077 source=srctype, url=url)
2078
2078
2079 for i in xrange(clstart, clend):
2079 for i in xrange(clstart, clend):
2080 self.hook("incoming", node=hex(cl.node(i)),
2080 self.hook("incoming", node=hex(cl.node(i)),
2081 source=srctype, url=url)
2081 source=srctype, url=url)
2082
2082
2083 # never return 0 here:
2083 # never return 0 here:
2084 if newheads < oldheads:
2084 if newheads < oldheads:
2085 return newheads - oldheads - 1
2085 return newheads - oldheads - 1
2086 else:
2086 else:
2087 return newheads - oldheads + 1
2087 return newheads - oldheads + 1
2088
2088
2089
2089
2090 def stream_in(self, remote):
2090 def stream_in(self, remote):
2091 fp = remote.stream_out()
2091 fp = remote.stream_out()
2092 l = fp.readline()
2092 l = fp.readline()
2093 try:
2093 try:
2094 resp = int(l)
2094 resp = int(l)
2095 except ValueError:
2095 except ValueError:
2096 raise error.ResponseError(
2096 raise error.ResponseError(
2097 _('Unexpected response from remote server:'), l)
2097 _('Unexpected response from remote server:'), l)
2098 if resp == 1:
2098 if resp == 1:
2099 raise util.Abort(_('operation forbidden by server'))
2099 raise util.Abort(_('operation forbidden by server'))
2100 elif resp == 2:
2100 elif resp == 2:
2101 raise util.Abort(_('locking the remote repository failed'))
2101 raise util.Abort(_('locking the remote repository failed'))
2102 elif resp != 0:
2102 elif resp != 0:
2103 raise util.Abort(_('the server sent an unknown error code'))
2103 raise util.Abort(_('the server sent an unknown error code'))
2104 self.ui.status(_('streaming all changes\n'))
2104 self.ui.status(_('streaming all changes\n'))
2105 l = fp.readline()
2105 l = fp.readline()
2106 try:
2106 try:
2107 total_files, total_bytes = map(int, l.split(' ', 1))
2107 total_files, total_bytes = map(int, l.split(' ', 1))
2108 except (ValueError, TypeError):
2108 except (ValueError, TypeError):
2109 raise error.ResponseError(
2109 raise error.ResponseError(
2110 _('Unexpected response from remote server:'), l)
2110 _('Unexpected response from remote server:'), l)
2111 self.ui.status(_('%d files to transfer, %s of data\n') %
2111 self.ui.status(_('%d files to transfer, %s of data\n') %
2112 (total_files, util.bytecount(total_bytes)))
2112 (total_files, util.bytecount(total_bytes)))
2113 start = time.time()
2113 start = time.time()
2114 for i in xrange(total_files):
2114 for i in xrange(total_files):
2115 # XXX doesn't support '\n' or '\r' in filenames
2115 # XXX doesn't support '\n' or '\r' in filenames
2116 l = fp.readline()
2116 l = fp.readline()
2117 try:
2117 try:
2118 name, size = l.split('\0', 1)
2118 name, size = l.split('\0', 1)
2119 size = int(size)
2119 size = int(size)
2120 except (ValueError, TypeError):
2120 except (ValueError, TypeError):
2121 raise error.ResponseError(
2121 raise error.ResponseError(
2122 _('Unexpected response from remote server:'), l)
2122 _('Unexpected response from remote server:'), l)
2123 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2123 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2124 # for backwards compat, name was partially encoded
2124 # for backwards compat, name was partially encoded
2125 ofp = self.sopener(store.decodedir(name), 'w')
2125 ofp = self.sopener(store.decodedir(name), 'w')
2126 for chunk in util.filechunkiter(fp, limit=size):
2126 for chunk in util.filechunkiter(fp, limit=size):
2127 ofp.write(chunk)
2127 ofp.write(chunk)
2128 ofp.close()
2128 ofp.close()
2129 elapsed = time.time() - start
2129 elapsed = time.time() - start
2130 if elapsed <= 0:
2130 if elapsed <= 0:
2131 elapsed = 0.001
2131 elapsed = 0.001
2132 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2132 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2133 (util.bytecount(total_bytes), elapsed,
2133 (util.bytecount(total_bytes), elapsed,
2134 util.bytecount(total_bytes / elapsed)))
2134 util.bytecount(total_bytes / elapsed)))
2135 self.invalidate()
2135 self.invalidate()
2136 return len(self.heads()) + 1
2136 return len(self.heads()) + 1
2137
2137
2138 def clone(self, remote, heads=[], stream=False):
2138 def clone(self, remote, heads=[], stream=False):
2139 '''clone remote repository.
2139 '''clone remote repository.
2140
2140
2141 keyword arguments:
2141 keyword arguments:
2142 heads: list of revs to clone (forces use of pull)
2142 heads: list of revs to clone (forces use of pull)
2143 stream: use streaming clone if possible'''
2143 stream: use streaming clone if possible'''
2144
2144
2145 # now, all clients that can request uncompressed clones can
2145 # now, all clients that can request uncompressed clones can
2146 # read repo formats supported by all servers that can serve
2146 # read repo formats supported by all servers that can serve
2147 # them.
2147 # them.
2148
2148
2149 # if revlog format changes, client will have to check version
2149 # if revlog format changes, client will have to check version
2150 # and format flags on "stream" capability, and use
2150 # and format flags on "stream" capability, and use
2151 # uncompressed only if compatible.
2151 # uncompressed only if compatible.
2152
2152
2153 if stream and not heads and remote.capable('stream'):
2153 if stream and not heads and remote.capable('stream'):
2154 return self.stream_in(remote)
2154 return self.stream_in(remote)
2155 return self.pull(remote, heads)
2155 return self.pull(remote, heads)
2156
2156
2157 # used to avoid circular references so destructors work
2157 # used to avoid circular references so destructors work
2158 def aftertrans(files):
2158 def aftertrans(files):
2159 renamefiles = [tuple(t) for t in files]
2159 renamefiles = [tuple(t) for t in files]
2160 def a():
2160 def a():
2161 for src, dest in renamefiles:
2161 for src, dest in renamefiles:
2162 util.rename(src, dest)
2162 util.rename(src, dest)
2163 return a
2163 return a
2164
2164
2165 def instance(ui, path, create):
2165 def instance(ui, path, create):
2166 return localrepository(ui, util.drop_scheme('file', path), create)
2166 return localrepository(ui, util.drop_scheme('file', path), create)
2167
2167
2168 def islocal(path):
2168 def islocal(path):
2169 return True
2169 return True
General Comments 0
You need to be logged in to leave comments. Login now