##// END OF EJS Templates
localrepo/branchcache: kill unused localrepo.branchcache...
Benoit Boissinot -
r9674:603b23c6 default
parent child Browse files
Show More
@@ -1,2172 +1,2170
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self.branchcache = None
101 self._branchcache = None # in UTF-8
102 self._ubranchcache = None # UTF-8 version of branchcache
103 self._branchcachetip = None
102 self._branchcachetip = None
104 self.nodetagscache = None
103 self.nodetagscache = None
105 self.filterpats = {}
104 self.filterpats = {}
106 self._datafilters = {}
105 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
108
107
109 @propertycache
108 @propertycache
110 def changelog(self):
109 def changelog(self):
111 c = changelog.changelog(self.sopener)
110 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
111 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
112 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
113 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
114 c.readpending('00changelog.i.a')
116 self.sopener.defversion = c.version
115 self.sopener.defversion = c.version
117 return c
116 return c
118
117
119 @propertycache
118 @propertycache
120 def manifest(self):
119 def manifest(self):
121 return manifest.manifest(self.sopener)
120 return manifest.manifest(self.sopener)
122
121
123 @propertycache
122 @propertycache
124 def dirstate(self):
123 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
124 return dirstate.dirstate(self.opener, self.ui, self.root)
126
125
127 def __getitem__(self, changeid):
126 def __getitem__(self, changeid):
128 if changeid is None:
127 if changeid is None:
129 return context.workingctx(self)
128 return context.workingctx(self)
130 return context.changectx(self, changeid)
129 return context.changectx(self, changeid)
131
130
132 def __nonzero__(self):
131 def __nonzero__(self):
133 return True
132 return True
134
133
135 def __len__(self):
134 def __len__(self):
136 return len(self.changelog)
135 return len(self.changelog)
137
136
138 def __iter__(self):
137 def __iter__(self):
139 for i in xrange(len(self)):
138 for i in xrange(len(self)):
140 yield i
139 yield i
141
140
142 def url(self):
141 def url(self):
143 return 'file:' + self.root
142 return 'file:' + self.root
144
143
145 def hook(self, name, throw=False, **args):
144 def hook(self, name, throw=False, **args):
146 return hook.hook(self.ui, self, name, throw, **args)
145 return hook.hook(self.ui, self, name, throw, **args)
147
146
148 tag_disallowed = ':\r\n'
147 tag_disallowed = ':\r\n'
149
148
150 def _tag(self, names, node, message, local, user, date, extra={}):
149 def _tag(self, names, node, message, local, user, date, extra={}):
151 if isinstance(names, str):
150 if isinstance(names, str):
152 allchars = names
151 allchars = names
153 names = (names,)
152 names = (names,)
154 else:
153 else:
155 allchars = ''.join(names)
154 allchars = ''.join(names)
156 for c in self.tag_disallowed:
155 for c in self.tag_disallowed:
157 if c in allchars:
156 if c in allchars:
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
157 raise util.Abort(_('%r cannot be used in a tag name') % c)
159
158
160 for name in names:
159 for name in names:
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
160 self.hook('pretag', throw=True, node=hex(node), tag=name,
162 local=local)
161 local=local)
163
162
164 def writetags(fp, names, munge, prevtags):
163 def writetags(fp, names, munge, prevtags):
165 fp.seek(0, 2)
164 fp.seek(0, 2)
166 if prevtags and prevtags[-1] != '\n':
165 if prevtags and prevtags[-1] != '\n':
167 fp.write('\n')
166 fp.write('\n')
168 for name in names:
167 for name in names:
169 m = munge and munge(name) or name
168 m = munge and munge(name) or name
170 if self._tagtypes and name in self._tagtypes:
169 if self._tagtypes and name in self._tagtypes:
171 old = self._tags.get(name, nullid)
170 old = self._tags.get(name, nullid)
172 fp.write('%s %s\n' % (hex(old), m))
171 fp.write('%s %s\n' % (hex(old), m))
173 fp.write('%s %s\n' % (hex(node), m))
172 fp.write('%s %s\n' % (hex(node), m))
174 fp.close()
173 fp.close()
175
174
176 prevtags = ''
175 prevtags = ''
177 if local:
176 if local:
178 try:
177 try:
179 fp = self.opener('localtags', 'r+')
178 fp = self.opener('localtags', 'r+')
180 except IOError:
179 except IOError:
181 fp = self.opener('localtags', 'a')
180 fp = self.opener('localtags', 'a')
182 else:
181 else:
183 prevtags = fp.read()
182 prevtags = fp.read()
184
183
185 # local tags are stored in the current charset
184 # local tags are stored in the current charset
186 writetags(fp, names, None, prevtags)
185 writetags(fp, names, None, prevtags)
187 for name in names:
186 for name in names:
188 self.hook('tag', node=hex(node), tag=name, local=local)
187 self.hook('tag', node=hex(node), tag=name, local=local)
189 return
188 return
190
189
191 try:
190 try:
192 fp = self.wfile('.hgtags', 'rb+')
191 fp = self.wfile('.hgtags', 'rb+')
193 except IOError:
192 except IOError:
194 fp = self.wfile('.hgtags', 'ab')
193 fp = self.wfile('.hgtags', 'ab')
195 else:
194 else:
196 prevtags = fp.read()
195 prevtags = fp.read()
197
196
198 # committed tags are stored in UTF-8
197 # committed tags are stored in UTF-8
199 writetags(fp, names, encoding.fromlocal, prevtags)
198 writetags(fp, names, encoding.fromlocal, prevtags)
200
199
201 if '.hgtags' not in self.dirstate:
200 if '.hgtags' not in self.dirstate:
202 self.add(['.hgtags'])
201 self.add(['.hgtags'])
203
202
204 m = match_.exact(self.root, '', ['.hgtags'])
203 m = match_.exact(self.root, '', ['.hgtags'])
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
204 tagnode = self.commit(message, user, date, extra=extra, match=m)
206
205
207 for name in names:
206 for name in names:
208 self.hook('tag', node=hex(node), tag=name, local=local)
207 self.hook('tag', node=hex(node), tag=name, local=local)
209
208
210 return tagnode
209 return tagnode
211
210
212 def tag(self, names, node, message, local, user, date):
211 def tag(self, names, node, message, local, user, date):
213 '''tag a revision with one or more symbolic names.
212 '''tag a revision with one or more symbolic names.
214
213
215 names is a list of strings or, when adding a single tag, names may be a
214 names is a list of strings or, when adding a single tag, names may be a
216 string.
215 string.
217
216
218 if local is True, the tags are stored in a per-repository file.
217 if local is True, the tags are stored in a per-repository file.
219 otherwise, they are stored in the .hgtags file, and a new
218 otherwise, they are stored in the .hgtags file, and a new
220 changeset is committed with the change.
219 changeset is committed with the change.
221
220
222 keyword arguments:
221 keyword arguments:
223
222
224 local: whether to store tags in non-version-controlled file
223 local: whether to store tags in non-version-controlled file
225 (default False)
224 (default False)
226
225
227 message: commit message to use if committing
226 message: commit message to use if committing
228
227
229 user: name of user to use if committing
228 user: name of user to use if committing
230
229
231 date: date tuple to use if committing'''
230 date: date tuple to use if committing'''
232
231
233 for x in self.status()[:5]:
232 for x in self.status()[:5]:
234 if '.hgtags' in x:
233 if '.hgtags' in x:
235 raise util.Abort(_('working copy of .hgtags is changed '
234 raise util.Abort(_('working copy of .hgtags is changed '
236 '(please commit .hgtags manually)'))
235 '(please commit .hgtags manually)'))
237
236
238 self.tags() # instantiate the cache
237 self.tags() # instantiate the cache
239 self._tag(names, node, message, local, user, date)
238 self._tag(names, node, message, local, user, date)
240
239
241 def tags(self):
240 def tags(self):
242 '''return a mapping of tag to node'''
241 '''return a mapping of tag to node'''
243 if self._tags is None:
242 if self._tags is None:
244 (self._tags, self._tagtypes) = self._findtags()
243 (self._tags, self._tagtypes) = self._findtags()
245
244
246 return self._tags
245 return self._tags
247
246
248 def _findtags(self):
247 def _findtags(self):
249 '''Do the hard work of finding tags. Return a pair of dicts
248 '''Do the hard work of finding tags. Return a pair of dicts
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
251 maps tag name to a string like \'global\' or \'local\'.
250 maps tag name to a string like \'global\' or \'local\'.
252 Subclasses or extensions are free to add their own tags, but
251 Subclasses or extensions are free to add their own tags, but
253 should be aware that the returned dicts will be retained for the
252 should be aware that the returned dicts will be retained for the
254 duration of the localrepo object.'''
253 duration of the localrepo object.'''
255
254
256 # XXX what tagtype should subclasses/extensions use? Currently
255 # XXX what tagtype should subclasses/extensions use? Currently
257 # mq and bookmarks add tags, but do not set the tagtype at all.
256 # mq and bookmarks add tags, but do not set the tagtype at all.
258 # Should each extension invent its own tag type? Should there
257 # Should each extension invent its own tag type? Should there
259 # be one tagtype for all such "virtual" tags? Or is the status
258 # be one tagtype for all such "virtual" tags? Or is the status
260 # quo fine?
259 # quo fine?
261
260
262 alltags = {} # map tag name to (node, hist)
261 alltags = {} # map tag name to (node, hist)
263 tagtypes = {}
262 tagtypes = {}
264
263
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
264 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
265 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
267
266
268 # Build the return dicts. Have to re-encode tag names because
267 # Build the return dicts. Have to re-encode tag names because
269 # the tags module always uses UTF-8 (in order not to lose info
268 # the tags module always uses UTF-8 (in order not to lose info
270 # writing to the cache), but the rest of Mercurial wants them in
269 # writing to the cache), but the rest of Mercurial wants them in
271 # local encoding.
270 # local encoding.
272 tags = {}
271 tags = {}
273 for (name, (node, hist)) in alltags.iteritems():
272 for (name, (node, hist)) in alltags.iteritems():
274 if node != nullid:
273 if node != nullid:
275 tags[encoding.tolocal(name)] = node
274 tags[encoding.tolocal(name)] = node
276 tags['tip'] = self.changelog.tip()
275 tags['tip'] = self.changelog.tip()
277 tagtypes = dict([(encoding.tolocal(name), value)
276 tagtypes = dict([(encoding.tolocal(name), value)
278 for (name, value) in tagtypes.iteritems()])
277 for (name, value) in tagtypes.iteritems()])
279 return (tags, tagtypes)
278 return (tags, tagtypes)
280
279
281 def tagtype(self, tagname):
280 def tagtype(self, tagname):
282 '''
281 '''
283 return the type of the given tag. result can be:
282 return the type of the given tag. result can be:
284
283
285 'local' : a local tag
284 'local' : a local tag
286 'global' : a global tag
285 'global' : a global tag
287 None : tag does not exist
286 None : tag does not exist
288 '''
287 '''
289
288
290 self.tags()
289 self.tags()
291
290
292 return self._tagtypes.get(tagname)
291 return self._tagtypes.get(tagname)
293
292
294 def tagslist(self):
293 def tagslist(self):
295 '''return a list of tags ordered by revision'''
294 '''return a list of tags ordered by revision'''
296 l = []
295 l = []
297 for t, n in self.tags().iteritems():
296 for t, n in self.tags().iteritems():
298 try:
297 try:
299 r = self.changelog.rev(n)
298 r = self.changelog.rev(n)
300 except:
299 except:
301 r = -2 # sort to the beginning of the list if unknown
300 r = -2 # sort to the beginning of the list if unknown
302 l.append((r, t, n))
301 l.append((r, t, n))
303 return [(t, n) for r, t, n in sorted(l)]
302 return [(t, n) for r, t, n in sorted(l)]
304
303
305 def nodetags(self, node):
304 def nodetags(self, node):
306 '''return the tags associated with a node'''
305 '''return the tags associated with a node'''
307 if not self.nodetagscache:
306 if not self.nodetagscache:
308 self.nodetagscache = {}
307 self.nodetagscache = {}
309 for t, n in self.tags().iteritems():
308 for t, n in self.tags().iteritems():
310 self.nodetagscache.setdefault(n, []).append(t)
309 self.nodetagscache.setdefault(n, []).append(t)
311 return self.nodetagscache.get(node, [])
310 return self.nodetagscache.get(node, [])
312
311
313 def _branchtags(self, partial, lrev):
312 def _branchtags(self, partial, lrev):
314 # TODO: rename this function?
313 # TODO: rename this function?
315 tiprev = len(self) - 1
314 tiprev = len(self) - 1
316 if lrev != tiprev:
315 if lrev != tiprev:
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319
318
320 return partial
319 return partial
321
320
322 def lbranchmap(self):
321 def lbranchmap(self):
323 self.branchcache = {}
322 branchcache = {}
324 partial = self.branchmap()
323 partial = self.branchmap()
325
324
326 # the branch cache is stored on disk as UTF-8, but in the local
325 # the branch cache is stored on disk as UTF-8, but in the local
327 # charset internally
326 # charset internally
328 for k, v in partial.iteritems():
327 for k, v in partial.iteritems():
329 self.branchcache[encoding.tolocal(k)] = v
328 branchcache[encoding.tolocal(k)] = v
330 return self.branchcache
329 return branchcache
331
330
332 def branchmap(self):
331 def branchmap(self):
333 tip = self.changelog.tip()
332 tip = self.changelog.tip()
334 if self._ubranchcache is not None and self._branchcachetip == tip:
333 if self._branchcache is not None and self._branchcachetip == tip:
335 return self._ubranchcache
334 return self._branchcache
336
335
337 oldtip = self._branchcachetip
336 oldtip = self._branchcachetip
338 self._branchcachetip = tip
337 self._branchcachetip = tip
339 if oldtip is None or oldtip not in self.changelog.nodemap:
338 if oldtip is None or oldtip not in self.changelog.nodemap:
340 partial, last, lrev = self._readbranchcache()
339 partial, last, lrev = self._readbranchcache()
341 else:
340 else:
342 lrev = self.changelog.rev(oldtip)
341 lrev = self.changelog.rev(oldtip)
343 partial = self._ubranchcache
342 partial = self._branchcache
344
343
345 self._branchtags(partial, lrev)
344 self._branchtags(partial, lrev)
346 # this private cache holds all heads (not just tips)
345 # this private cache holds all heads (not just tips)
347 self._ubranchcache = partial
346 self._branchcache = partial
348
347
349 return self._ubranchcache
348 return self._branchcache
350
349
351 def branchtags(self):
350 def branchtags(self):
352 '''return a dict where branch names map to the tipmost head of
351 '''return a dict where branch names map to the tipmost head of
353 the branch, open heads come before closed'''
352 the branch, open heads come before closed'''
354 bt = {}
353 bt = {}
355 for bn, heads in self.lbranchmap().iteritems():
354 for bn, heads in self.lbranchmap().iteritems():
356 head = None
355 head = None
357 for i in range(len(heads)-1, -1, -1):
356 for i in range(len(heads)-1, -1, -1):
358 h = heads[i]
357 h = heads[i]
359 if 'close' not in self.changelog.read(h)[5]:
358 if 'close' not in self.changelog.read(h)[5]:
360 head = h
359 head = h
361 break
360 break
362 # no open heads were found
361 # no open heads were found
363 if head is None:
362 if head is None:
364 head = heads[-1]
363 head = heads[-1]
365 bt[bn] = head
364 bt[bn] = head
366 return bt
365 return bt
367
366
368
367
369 def _readbranchcache(self):
368 def _readbranchcache(self):
370 partial = {}
369 partial = {}
371 try:
370 try:
372 f = self.opener("branchheads.cache")
371 f = self.opener("branchheads.cache")
373 lines = f.read().split('\n')
372 lines = f.read().split('\n')
374 f.close()
373 f.close()
375 except (IOError, OSError):
374 except (IOError, OSError):
376 return {}, nullid, nullrev
375 return {}, nullid, nullrev
377
376
378 try:
377 try:
379 last, lrev = lines.pop(0).split(" ", 1)
378 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = bin(last), int(lrev)
379 last, lrev = bin(last), int(lrev)
381 if lrev >= len(self) or self[lrev].node() != last:
380 if lrev >= len(self) or self[lrev].node() != last:
382 # invalidate the cache
381 # invalidate the cache
383 raise ValueError('invalidating branch cache (tip differs)')
382 raise ValueError('invalidating branch cache (tip differs)')
384 for l in lines:
383 for l in lines:
385 if not l: continue
384 if not l: continue
386 node, label = l.split(" ", 1)
385 node, label = l.split(" ", 1)
387 partial.setdefault(label.strip(), []).append(bin(node))
386 partial.setdefault(label.strip(), []).append(bin(node))
388 except KeyboardInterrupt:
387 except KeyboardInterrupt:
389 raise
388 raise
390 except Exception, inst:
389 except Exception, inst:
391 if self.ui.debugflag:
390 if self.ui.debugflag:
392 self.ui.warn(str(inst), '\n')
391 self.ui.warn(str(inst), '\n')
393 partial, last, lrev = {}, nullid, nullrev
392 partial, last, lrev = {}, nullid, nullrev
394 return partial, last, lrev
393 return partial, last, lrev
395
394
396 def _writebranchcache(self, branches, tip, tiprev):
395 def _writebranchcache(self, branches, tip, tiprev):
397 try:
396 try:
398 f = self.opener("branchheads.cache", "w", atomictemp=True)
397 f = self.opener("branchheads.cache", "w", atomictemp=True)
399 f.write("%s %s\n" % (hex(tip), tiprev))
398 f.write("%s %s\n" % (hex(tip), tiprev))
400 for label, nodes in branches.iteritems():
399 for label, nodes in branches.iteritems():
401 for node in nodes:
400 for node in nodes:
402 f.write("%s %s\n" % (hex(node), label))
401 f.write("%s %s\n" % (hex(node), label))
403 f.rename()
402 f.rename()
404 except (IOError, OSError):
403 except (IOError, OSError):
405 pass
404 pass
406
405
407 def _updatebranchcache(self, partial, start, end):
406 def _updatebranchcache(self, partial, start, end):
408 # collect new branch entries
407 # collect new branch entries
409 newbranches = {}
408 newbranches = {}
410 for r in xrange(start, end):
409 for r in xrange(start, end):
411 c = self[r]
410 c = self[r]
412 newbranches.setdefault(c.branch(), []).append(c.node())
411 newbranches.setdefault(c.branch(), []).append(c.node())
413 # if older branchheads are reachable from new ones, they aren't
412 # if older branchheads are reachable from new ones, they aren't
414 # really branchheads. Note checking parents is insufficient:
413 # really branchheads. Note checking parents is insufficient:
415 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
414 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
416 for branch, newnodes in newbranches.iteritems():
415 for branch, newnodes in newbranches.iteritems():
417 bheads = partial.setdefault(branch, [])
416 bheads = partial.setdefault(branch, [])
418 bheads.extend(newnodes)
417 bheads.extend(newnodes)
419 if len(bheads) < 2:
418 if len(bheads) < 2:
420 continue
419 continue
421 newbheads = []
420 newbheads = []
422 # starting from tip means fewer passes over reachable
421 # starting from tip means fewer passes over reachable
423 while newnodes:
422 while newnodes:
424 latest = newnodes.pop()
423 latest = newnodes.pop()
425 if latest not in bheads:
424 if latest not in bheads:
426 continue
425 continue
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
426 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
428 reachable = self.changelog.reachable(latest, minbhrev)
427 reachable = self.changelog.reachable(latest, minbhrev)
429 bheads = [b for b in bheads if b not in reachable]
428 bheads = [b for b in bheads if b not in reachable]
430 newbheads.insert(0, latest)
429 newbheads.insert(0, latest)
431 bheads.extend(newbheads)
430 bheads.extend(newbheads)
432 partial[branch] = bheads
431 partial[branch] = bheads
433
432
434 def lookup(self, key):
433 def lookup(self, key):
435 if isinstance(key, int):
434 if isinstance(key, int):
436 return self.changelog.node(key)
435 return self.changelog.node(key)
437 elif key == '.':
436 elif key == '.':
438 return self.dirstate.parents()[0]
437 return self.dirstate.parents()[0]
439 elif key == 'null':
438 elif key == 'null':
440 return nullid
439 return nullid
441 elif key == 'tip':
440 elif key == 'tip':
442 return self.changelog.tip()
441 return self.changelog.tip()
443 n = self.changelog._match(key)
442 n = self.changelog._match(key)
444 if n:
443 if n:
445 return n
444 return n
446 if key in self.tags():
445 if key in self.tags():
447 return self.tags()[key]
446 return self.tags()[key]
448 if key in self.branchtags():
447 if key in self.branchtags():
449 return self.branchtags()[key]
448 return self.branchtags()[key]
450 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
451 if n:
450 if n:
452 return n
451 return n
453
452
454 # can't find key, check if it might have come from damaged dirstate
453 # can't find key, check if it might have come from damaged dirstate
455 if key in self.dirstate.parents():
454 if key in self.dirstate.parents():
456 raise error.Abort(_("working directory has unknown parent '%s'!")
455 raise error.Abort(_("working directory has unknown parent '%s'!")
457 % short(key))
456 % short(key))
458 try:
457 try:
459 if len(key) == 20:
458 if len(key) == 20:
460 key = hex(key)
459 key = hex(key)
461 except:
460 except:
462 pass
461 pass
463 raise error.RepoLookupError(_("unknown revision '%s'") % key)
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
464
463
465 def local(self):
464 def local(self):
466 return True
465 return True
467
466
468 def join(self, f):
467 def join(self, f):
469 return os.path.join(self.path, f)
468 return os.path.join(self.path, f)
470
469
471 def wjoin(self, f):
470 def wjoin(self, f):
472 return os.path.join(self.root, f)
471 return os.path.join(self.root, f)
473
472
474 def rjoin(self, f):
473 def rjoin(self, f):
475 return os.path.join(self.root, util.pconvert(f))
474 return os.path.join(self.root, util.pconvert(f))
476
475
477 def file(self, f):
476 def file(self, f):
478 if f[0] == '/':
477 if f[0] == '/':
479 f = f[1:]
478 f = f[1:]
480 return filelog.filelog(self.sopener, f)
479 return filelog.filelog(self.sopener, f)
481
480
482 def changectx(self, changeid):
481 def changectx(self, changeid):
483 return self[changeid]
482 return self[changeid]
484
483
485 def parents(self, changeid=None):
484 def parents(self, changeid=None):
486 '''get list of changectxs for parents of changeid'''
485 '''get list of changectxs for parents of changeid'''
487 return self[changeid].parents()
486 return self[changeid].parents()
488
487
489 def filectx(self, path, changeid=None, fileid=None):
488 def filectx(self, path, changeid=None, fileid=None):
490 """changeid can be a changeset revision, node, or tag.
489 """changeid can be a changeset revision, node, or tag.
491 fileid can be a file revision or node."""
490 fileid can be a file revision or node."""
492 return context.filectx(self, path, changeid, fileid)
491 return context.filectx(self, path, changeid, fileid)
493
492
494 def getcwd(self):
493 def getcwd(self):
495 return self.dirstate.getcwd()
494 return self.dirstate.getcwd()
496
495
497 def pathto(self, f, cwd=None):
496 def pathto(self, f, cwd=None):
498 return self.dirstate.pathto(f, cwd)
497 return self.dirstate.pathto(f, cwd)
499
498
500 def wfile(self, f, mode='r'):
499 def wfile(self, f, mode='r'):
501 return self.wopener(f, mode)
500 return self.wopener(f, mode)
502
501
503 def _link(self, f):
502 def _link(self, f):
504 return os.path.islink(self.wjoin(f))
503 return os.path.islink(self.wjoin(f))
505
504
506 def _filter(self, filter, filename, data):
505 def _filter(self, filter, filename, data):
507 if filter not in self.filterpats:
506 if filter not in self.filterpats:
508 l = []
507 l = []
509 for pat, cmd in self.ui.configitems(filter):
508 for pat, cmd in self.ui.configitems(filter):
510 if cmd == '!':
509 if cmd == '!':
511 continue
510 continue
512 mf = match_.match(self.root, '', [pat])
511 mf = match_.match(self.root, '', [pat])
513 fn = None
512 fn = None
514 params = cmd
513 params = cmd
515 for name, filterfn in self._datafilters.iteritems():
514 for name, filterfn in self._datafilters.iteritems():
516 if cmd.startswith(name):
515 if cmd.startswith(name):
517 fn = filterfn
516 fn = filterfn
518 params = cmd[len(name):].lstrip()
517 params = cmd[len(name):].lstrip()
519 break
518 break
520 if not fn:
519 if not fn:
521 fn = lambda s, c, **kwargs: util.filter(s, c)
520 fn = lambda s, c, **kwargs: util.filter(s, c)
522 # Wrap old filters not supporting keyword arguments
521 # Wrap old filters not supporting keyword arguments
523 if not inspect.getargspec(fn)[2]:
522 if not inspect.getargspec(fn)[2]:
524 oldfn = fn
523 oldfn = fn
525 fn = lambda s, c, **kwargs: oldfn(s, c)
524 fn = lambda s, c, **kwargs: oldfn(s, c)
526 l.append((mf, fn, params))
525 l.append((mf, fn, params))
527 self.filterpats[filter] = l
526 self.filterpats[filter] = l
528
527
529 for mf, fn, cmd in self.filterpats[filter]:
528 for mf, fn, cmd in self.filterpats[filter]:
530 if mf(filename):
529 if mf(filename):
531 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
530 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
532 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
531 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
533 break
532 break
534
533
535 return data
534 return data
536
535
537 def adddatafilter(self, name, filter):
536 def adddatafilter(self, name, filter):
538 self._datafilters[name] = filter
537 self._datafilters[name] = filter
539
538
540 def wread(self, filename):
539 def wread(self, filename):
541 if self._link(filename):
540 if self._link(filename):
542 data = os.readlink(self.wjoin(filename))
541 data = os.readlink(self.wjoin(filename))
543 else:
542 else:
544 data = self.wopener(filename, 'r').read()
543 data = self.wopener(filename, 'r').read()
545 return self._filter("encode", filename, data)
544 return self._filter("encode", filename, data)
546
545
547 def wwrite(self, filename, data, flags):
546 def wwrite(self, filename, data, flags):
548 data = self._filter("decode", filename, data)
547 data = self._filter("decode", filename, data)
549 try:
548 try:
550 os.unlink(self.wjoin(filename))
549 os.unlink(self.wjoin(filename))
551 except OSError:
550 except OSError:
552 pass
551 pass
553 if 'l' in flags:
552 if 'l' in flags:
554 self.wopener.symlink(data, filename)
553 self.wopener.symlink(data, filename)
555 else:
554 else:
556 self.wopener(filename, 'w').write(data)
555 self.wopener(filename, 'w').write(data)
557 if 'x' in flags:
556 if 'x' in flags:
558 util.set_flags(self.wjoin(filename), False, True)
557 util.set_flags(self.wjoin(filename), False, True)
559
558
560 def wwritedata(self, filename, data):
559 def wwritedata(self, filename, data):
561 return self._filter("decode", filename, data)
560 return self._filter("decode", filename, data)
562
561
563 def transaction(self):
562 def transaction(self):
564 tr = self._transref and self._transref() or None
563 tr = self._transref and self._transref() or None
565 if tr and tr.running():
564 if tr and tr.running():
566 return tr.nest()
565 return tr.nest()
567
566
568 # abort here if the journal already exists
567 # abort here if the journal already exists
569 if os.path.exists(self.sjoin("journal")):
568 if os.path.exists(self.sjoin("journal")):
570 raise error.RepoError(_("journal already exists - run hg recover"))
569 raise error.RepoError(_("journal already exists - run hg recover"))
571
570
572 # save dirstate for rollback
571 # save dirstate for rollback
573 try:
572 try:
574 ds = self.opener("dirstate").read()
573 ds = self.opener("dirstate").read()
575 except IOError:
574 except IOError:
576 ds = ""
575 ds = ""
577 self.opener("journal.dirstate", "w").write(ds)
576 self.opener("journal.dirstate", "w").write(ds)
578 self.opener("journal.branch", "w").write(self.dirstate.branch())
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
579
578
580 renames = [(self.sjoin("journal"), self.sjoin("undo")),
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
581 (self.join("journal.dirstate"), self.join("undo.dirstate")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
582 (self.join("journal.branch"), self.join("undo.branch"))]
581 (self.join("journal.branch"), self.join("undo.branch"))]
583 tr = transaction.transaction(self.ui.warn, self.sopener,
582 tr = transaction.transaction(self.ui.warn, self.sopener,
584 self.sjoin("journal"),
583 self.sjoin("journal"),
585 aftertrans(renames),
584 aftertrans(renames),
586 self.store.createmode)
585 self.store.createmode)
587 self._transref = weakref.ref(tr)
586 self._transref = weakref.ref(tr)
588 return tr
587 return tr
589
588
590 def recover(self):
589 def recover(self):
591 lock = self.lock()
590 lock = self.lock()
592 try:
591 try:
593 if os.path.exists(self.sjoin("journal")):
592 if os.path.exists(self.sjoin("journal")):
594 self.ui.status(_("rolling back interrupted transaction\n"))
593 self.ui.status(_("rolling back interrupted transaction\n"))
595 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
594 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
596 self.invalidate()
595 self.invalidate()
597 return True
596 return True
598 else:
597 else:
599 self.ui.warn(_("no interrupted transaction available\n"))
598 self.ui.warn(_("no interrupted transaction available\n"))
600 return False
599 return False
601 finally:
600 finally:
602 lock.release()
601 lock.release()
603
602
604 def rollback(self):
603 def rollback(self):
605 wlock = lock = None
604 wlock = lock = None
606 try:
605 try:
607 wlock = self.wlock()
606 wlock = self.wlock()
608 lock = self.lock()
607 lock = self.lock()
609 if os.path.exists(self.sjoin("undo")):
608 if os.path.exists(self.sjoin("undo")):
610 self.ui.status(_("rolling back last transaction\n"))
609 self.ui.status(_("rolling back last transaction\n"))
611 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
610 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
612 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
613 try:
612 try:
614 branch = self.opener("undo.branch").read()
613 branch = self.opener("undo.branch").read()
615 self.dirstate.setbranch(branch)
614 self.dirstate.setbranch(branch)
616 except IOError:
615 except IOError:
617 self.ui.warn(_("Named branch could not be reset, "
616 self.ui.warn(_("Named branch could not be reset, "
618 "current branch still is: %s\n")
617 "current branch still is: %s\n")
619 % encoding.tolocal(self.dirstate.branch()))
618 % encoding.tolocal(self.dirstate.branch()))
620 self.invalidate()
619 self.invalidate()
621 self.dirstate.invalidate()
620 self.dirstate.invalidate()
622 self.destroyed()
621 self.destroyed()
623 else:
622 else:
624 self.ui.warn(_("no rollback information available\n"))
623 self.ui.warn(_("no rollback information available\n"))
625 finally:
624 finally:
626 release(lock, wlock)
625 release(lock, wlock)
627
626
628 def invalidate(self):
627 def invalidate(self):
629 for a in "changelog manifest".split():
628 for a in "changelog manifest".split():
630 if a in self.__dict__:
629 if a in self.__dict__:
631 delattr(self, a)
630 delattr(self, a)
632 self._tags = None
631 self._tags = None
633 self._tagtypes = None
632 self._tagtypes = None
634 self.nodetagscache = None
633 self.nodetagscache = None
635 self.branchcache = None
634 self._branchcache = None # in UTF-8
636 self._ubranchcache = None
637 self._branchcachetip = None
635 self._branchcachetip = None
638
636
639 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
640 try:
638 try:
641 l = lock.lock(lockname, 0, releasefn, desc=desc)
639 l = lock.lock(lockname, 0, releasefn, desc=desc)
642 except error.LockHeld, inst:
640 except error.LockHeld, inst:
643 if not wait:
641 if not wait:
644 raise
642 raise
645 self.ui.warn(_("waiting for lock on %s held by %r\n") %
643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
646 (desc, inst.locker))
644 (desc, inst.locker))
647 # default to 600 seconds timeout
645 # default to 600 seconds timeout
648 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
649 releasefn, desc=desc)
647 releasefn, desc=desc)
650 if acquirefn:
648 if acquirefn:
651 acquirefn()
649 acquirefn()
652 return l
650 return l
653
651
654 def lock(self, wait=True):
652 def lock(self, wait=True):
655 '''Lock the repository store (.hg/store) and return a weak reference
653 '''Lock the repository store (.hg/store) and return a weak reference
656 to the lock. Use this before modifying the store (e.g. committing or
654 to the lock. Use this before modifying the store (e.g. committing or
657 stripping). If you are opening a transaction, get a lock as well.)'''
655 stripping). If you are opening a transaction, get a lock as well.)'''
658 l = self._lockref and self._lockref()
656 l = self._lockref and self._lockref()
659 if l is not None and l.held:
657 if l is not None and l.held:
660 l.lock()
658 l.lock()
661 return l
659 return l
662
660
663 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
661 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
664 _('repository %s') % self.origroot)
662 _('repository %s') % self.origroot)
665 self._lockref = weakref.ref(l)
663 self._lockref = weakref.ref(l)
666 return l
664 return l
667
665
668 def wlock(self, wait=True):
666 def wlock(self, wait=True):
669 '''Lock the non-store parts of the repository (everything under
667 '''Lock the non-store parts of the repository (everything under
670 .hg except .hg/store) and return a weak reference to the lock.
668 .hg except .hg/store) and return a weak reference to the lock.
671 Use this before modifying files in .hg.'''
669 Use this before modifying files in .hg.'''
672 l = self._wlockref and self._wlockref()
670 l = self._wlockref and self._wlockref()
673 if l is not None and l.held:
671 if l is not None and l.held:
674 l.lock()
672 l.lock()
675 return l
673 return l
676
674
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
675 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
678 self.dirstate.invalidate, _('working directory of %s') %
676 self.dirstate.invalidate, _('working directory of %s') %
679 self.origroot)
677 self.origroot)
680 self._wlockref = weakref.ref(l)
678 self._wlockref = weakref.ref(l)
681 return l
679 return l
682
680
683 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
681 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
684 """
682 """
685 commit an individual file as part of a larger transaction
683 commit an individual file as part of a larger transaction
686 """
684 """
687
685
688 fname = fctx.path()
686 fname = fctx.path()
689 text = fctx.data()
687 text = fctx.data()
690 flog = self.file(fname)
688 flog = self.file(fname)
691 fparent1 = manifest1.get(fname, nullid)
689 fparent1 = manifest1.get(fname, nullid)
692 fparent2 = fparent2o = manifest2.get(fname, nullid)
690 fparent2 = fparent2o = manifest2.get(fname, nullid)
693
691
694 meta = {}
692 meta = {}
695 copy = fctx.renamed()
693 copy = fctx.renamed()
696 if copy and copy[0] != fname:
694 if copy and copy[0] != fname:
697 # Mark the new revision of this file as a copy of another
695 # Mark the new revision of this file as a copy of another
698 # file. This copy data will effectively act as a parent
696 # file. This copy data will effectively act as a parent
699 # of this new revision. If this is a merge, the first
697 # of this new revision. If this is a merge, the first
700 # parent will be the nullid (meaning "look up the copy data")
698 # parent will be the nullid (meaning "look up the copy data")
701 # and the second one will be the other parent. For example:
699 # and the second one will be the other parent. For example:
702 #
700 #
703 # 0 --- 1 --- 3 rev1 changes file foo
701 # 0 --- 1 --- 3 rev1 changes file foo
704 # \ / rev2 renames foo to bar and changes it
702 # \ / rev2 renames foo to bar and changes it
705 # \- 2 -/ rev3 should have bar with all changes and
703 # \- 2 -/ rev3 should have bar with all changes and
706 # should record that bar descends from
704 # should record that bar descends from
707 # bar in rev2 and foo in rev1
705 # bar in rev2 and foo in rev1
708 #
706 #
709 # this allows this merge to succeed:
707 # this allows this merge to succeed:
710 #
708 #
711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
709 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 # \ / merging rev3 and rev4 should use bar@rev2
710 # \ / merging rev3 and rev4 should use bar@rev2
713 # \- 2 --- 4 as the merge base
711 # \- 2 --- 4 as the merge base
714 #
712 #
715
713
716 cfname = copy[0]
714 cfname = copy[0]
717 crev = manifest1.get(cfname)
715 crev = manifest1.get(cfname)
718 newfparent = fparent2
716 newfparent = fparent2
719
717
720 if manifest2: # branch merge
718 if manifest2: # branch merge
721 if fparent2 == nullid or crev is None: # copied on remote side
719 if fparent2 == nullid or crev is None: # copied on remote side
722 if cfname in manifest2:
720 if cfname in manifest2:
723 crev = manifest2[cfname]
721 crev = manifest2[cfname]
724 newfparent = fparent1
722 newfparent = fparent1
725
723
726 # find source in nearest ancestor if we've lost track
724 # find source in nearest ancestor if we've lost track
727 if not crev:
725 if not crev:
728 self.ui.debug(" %s: searching for copy revision for %s\n" %
726 self.ui.debug(" %s: searching for copy revision for %s\n" %
729 (fname, cfname))
727 (fname, cfname))
730 for ancestor in self['.'].ancestors():
728 for ancestor in self['.'].ancestors():
731 if cfname in ancestor:
729 if cfname in ancestor:
732 crev = ancestor[cfname].filenode()
730 crev = ancestor[cfname].filenode()
733 break
731 break
734
732
735 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
733 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
736 meta["copy"] = cfname
734 meta["copy"] = cfname
737 meta["copyrev"] = hex(crev)
735 meta["copyrev"] = hex(crev)
738 fparent1, fparent2 = nullid, newfparent
736 fparent1, fparent2 = nullid, newfparent
739 elif fparent2 != nullid:
737 elif fparent2 != nullid:
740 # is one parent an ancestor of the other?
738 # is one parent an ancestor of the other?
741 fparentancestor = flog.ancestor(fparent1, fparent2)
739 fparentancestor = flog.ancestor(fparent1, fparent2)
742 if fparentancestor == fparent1:
740 if fparentancestor == fparent1:
743 fparent1, fparent2 = fparent2, nullid
741 fparent1, fparent2 = fparent2, nullid
744 elif fparentancestor == fparent2:
742 elif fparentancestor == fparent2:
745 fparent2 = nullid
743 fparent2 = nullid
746
744
747 # is the file changed?
745 # is the file changed?
748 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
746 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
749 changelist.append(fname)
747 changelist.append(fname)
750 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
748 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
751
749
752 # are just the flags changed during merge?
750 # are just the flags changed during merge?
753 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
751 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
754 changelist.append(fname)
752 changelist.append(fname)
755
753
756 return fparent1
754 return fparent1
757
755
758 def commit(self, text="", user=None, date=None, match=None, force=False,
756 def commit(self, text="", user=None, date=None, match=None, force=False,
759 editor=False, extra={}):
757 editor=False, extra={}):
760 """Add a new revision to current repository.
758 """Add a new revision to current repository.
761
759
762 Revision information is gathered from the working directory,
760 Revision information is gathered from the working directory,
763 match can be used to filter the committed files. If editor is
761 match can be used to filter the committed files. If editor is
764 supplied, it is called to get a commit message.
762 supplied, it is called to get a commit message.
765 """
763 """
766
764
767 def fail(f, msg):
765 def fail(f, msg):
768 raise util.Abort('%s: %s' % (f, msg))
766 raise util.Abort('%s: %s' % (f, msg))
769
767
770 if not match:
768 if not match:
771 match = match_.always(self.root, '')
769 match = match_.always(self.root, '')
772
770
773 if not force:
771 if not force:
774 vdirs = []
772 vdirs = []
775 match.dir = vdirs.append
773 match.dir = vdirs.append
776 match.bad = fail
774 match.bad = fail
777
775
778 wlock = self.wlock()
776 wlock = self.wlock()
779 try:
777 try:
780 p1, p2 = self.dirstate.parents()
778 p1, p2 = self.dirstate.parents()
781 wctx = self[None]
779 wctx = self[None]
782
780
783 if (not force and p2 != nullid and match and
781 if (not force and p2 != nullid and match and
784 (match.files() or match.anypats())):
782 (match.files() or match.anypats())):
785 raise util.Abort(_('cannot partially commit a merge '
783 raise util.Abort(_('cannot partially commit a merge '
786 '(do not specify files or patterns)'))
784 '(do not specify files or patterns)'))
787
785
788 changes = self.status(match=match, clean=force)
786 changes = self.status(match=match, clean=force)
789 if force:
787 if force:
790 changes[0].extend(changes[6]) # mq may commit unchanged files
788 changes[0].extend(changes[6]) # mq may commit unchanged files
791
789
792 # check subrepos
790 # check subrepos
793 subs = []
791 subs = []
794 for s in wctx.substate:
792 for s in wctx.substate:
795 if match(s) and wctx.sub(s).dirty():
793 if match(s) and wctx.sub(s).dirty():
796 subs.append(s)
794 subs.append(s)
797 if subs and '.hgsubstate' not in changes[0]:
795 if subs and '.hgsubstate' not in changes[0]:
798 changes[0].insert(0, '.hgsubstate')
796 changes[0].insert(0, '.hgsubstate')
799
797
800 # make sure all explicit patterns are matched
798 # make sure all explicit patterns are matched
801 if not force and match.files():
799 if not force and match.files():
802 matched = set(changes[0] + changes[1] + changes[2])
800 matched = set(changes[0] + changes[1] + changes[2])
803
801
804 for f in match.files():
802 for f in match.files():
805 if f == '.' or f in matched or f in wctx.substate:
803 if f == '.' or f in matched or f in wctx.substate:
806 continue
804 continue
807 if f in changes[3]: # missing
805 if f in changes[3]: # missing
808 fail(f, _('file not found!'))
806 fail(f, _('file not found!'))
809 if f in vdirs: # visited directory
807 if f in vdirs: # visited directory
810 d = f + '/'
808 d = f + '/'
811 for mf in matched:
809 for mf in matched:
812 if mf.startswith(d):
810 if mf.startswith(d):
813 break
811 break
814 else:
812 else:
815 fail(f, _("no match under directory!"))
813 fail(f, _("no match under directory!"))
816 elif f not in self.dirstate:
814 elif f not in self.dirstate:
817 fail(f, _("file not tracked!"))
815 fail(f, _("file not tracked!"))
818
816
819 if (not force and not extra.get("close") and p2 == nullid
817 if (not force and not extra.get("close") and p2 == nullid
820 and not (changes[0] or changes[1] or changes[2])
818 and not (changes[0] or changes[1] or changes[2])
821 and self[None].branch() == self['.'].branch()):
819 and self[None].branch() == self['.'].branch()):
822 return None
820 return None
823
821
824 ms = merge_.mergestate(self)
822 ms = merge_.mergestate(self)
825 for f in changes[0]:
823 for f in changes[0]:
826 if f in ms and ms[f] == 'u':
824 if f in ms and ms[f] == 'u':
827 raise util.Abort(_("unresolved merge conflicts "
825 raise util.Abort(_("unresolved merge conflicts "
828 "(see hg resolve)"))
826 "(see hg resolve)"))
829
827
830 cctx = context.workingctx(self, (p1, p2), text, user, date,
828 cctx = context.workingctx(self, (p1, p2), text, user, date,
831 extra, changes)
829 extra, changes)
832 if editor:
830 if editor:
833 cctx._text = editor(self, cctx, subs)
831 cctx._text = editor(self, cctx, subs)
834
832
835 # commit subs
833 # commit subs
836 if subs:
834 if subs:
837 state = wctx.substate.copy()
835 state = wctx.substate.copy()
838 for s in subs:
836 for s in subs:
839 self.ui.status(_('committing subrepository %s\n') % s)
837 self.ui.status(_('committing subrepository %s\n') % s)
840 sr = wctx.sub(s).commit(cctx._text, user, date)
838 sr = wctx.sub(s).commit(cctx._text, user, date)
841 state[s] = (state[s][0], sr)
839 state[s] = (state[s][0], sr)
842 subrepo.writestate(self, state)
840 subrepo.writestate(self, state)
843
841
844 ret = self.commitctx(cctx, True)
842 ret = self.commitctx(cctx, True)
845
843
846 # update dirstate and mergestate
844 # update dirstate and mergestate
847 for f in changes[0] + changes[1]:
845 for f in changes[0] + changes[1]:
848 self.dirstate.normal(f)
846 self.dirstate.normal(f)
849 for f in changes[2]:
847 for f in changes[2]:
850 self.dirstate.forget(f)
848 self.dirstate.forget(f)
851 self.dirstate.setparents(ret)
849 self.dirstate.setparents(ret)
852 ms.reset()
850 ms.reset()
853
851
854 return ret
852 return ret
855
853
856 finally:
854 finally:
857 wlock.release()
855 wlock.release()
858
856
859 def commitctx(self, ctx, error=False):
857 def commitctx(self, ctx, error=False):
860 """Add a new revision to current repository.
858 """Add a new revision to current repository.
861
859
862 Revision information is passed via the context argument.
860 Revision information is passed via the context argument.
863 """
861 """
864
862
865 tr = lock = None
863 tr = lock = None
866 removed = ctx.removed()
864 removed = ctx.removed()
867 p1, p2 = ctx.p1(), ctx.p2()
865 p1, p2 = ctx.p1(), ctx.p2()
868 m1 = p1.manifest().copy()
866 m1 = p1.manifest().copy()
869 m2 = p2.manifest()
867 m2 = p2.manifest()
870 user = ctx.user()
868 user = ctx.user()
871
869
872 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
870 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
873 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
871 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
874
872
875 lock = self.lock()
873 lock = self.lock()
876 try:
874 try:
877 tr = self.transaction()
875 tr = self.transaction()
878 trp = weakref.proxy(tr)
876 trp = weakref.proxy(tr)
879
877
880 # check in files
878 # check in files
881 new = {}
879 new = {}
882 changed = []
880 changed = []
883 linkrev = len(self)
881 linkrev = len(self)
884 for f in sorted(ctx.modified() + ctx.added()):
882 for f in sorted(ctx.modified() + ctx.added()):
885 self.ui.note(f + "\n")
883 self.ui.note(f + "\n")
886 try:
884 try:
887 fctx = ctx[f]
885 fctx = ctx[f]
888 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
886 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
889 changed)
887 changed)
890 m1.set(f, fctx.flags())
888 m1.set(f, fctx.flags())
891 except (OSError, IOError):
889 except (OSError, IOError):
892 if error:
890 if error:
893 self.ui.warn(_("trouble committing %s!\n") % f)
891 self.ui.warn(_("trouble committing %s!\n") % f)
894 raise
892 raise
895 else:
893 else:
896 removed.append(f)
894 removed.append(f)
897
895
898 # update manifest
896 # update manifest
899 m1.update(new)
897 m1.update(new)
900 removed = [f for f in sorted(removed) if f in m1 or f in m2]
898 removed = [f for f in sorted(removed) if f in m1 or f in m2]
901 drop = [f for f in removed if f in m1]
899 drop = [f for f in removed if f in m1]
902 for f in drop:
900 for f in drop:
903 del m1[f]
901 del m1[f]
904 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
902 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
905 p2.manifestnode(), (new, drop))
903 p2.manifestnode(), (new, drop))
906
904
907 # update changelog
905 # update changelog
908 self.changelog.delayupdate()
906 self.changelog.delayupdate()
909 n = self.changelog.add(mn, changed + removed, ctx.description(),
907 n = self.changelog.add(mn, changed + removed, ctx.description(),
910 trp, p1.node(), p2.node(),
908 trp, p1.node(), p2.node(),
911 user, ctx.date(), ctx.extra().copy())
909 user, ctx.date(), ctx.extra().copy())
912 p = lambda: self.changelog.writepending() and self.root or ""
910 p = lambda: self.changelog.writepending() and self.root or ""
913 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
911 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
914 parent2=xp2, pending=p)
912 parent2=xp2, pending=p)
915 self.changelog.finalize(trp)
913 self.changelog.finalize(trp)
916 tr.close()
914 tr.close()
917
915
918 if self.branchcache:
916 if self._branchcache:
919 self.branchtags()
917 self.branchtags()
920
918
921 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
919 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
922 return n
920 return n
923 finally:
921 finally:
924 del tr
922 del tr
925 lock.release()
923 lock.release()
926
924
927 def destroyed(self):
925 def destroyed(self):
928 '''Inform the repository that nodes have been destroyed.
926 '''Inform the repository that nodes have been destroyed.
929 Intended for use by strip and rollback, so there's a common
927 Intended for use by strip and rollback, so there's a common
930 place for anything that has to be done after destroying history.'''
928 place for anything that has to be done after destroying history.'''
931 # XXX it might be nice if we could take the list of destroyed
929 # XXX it might be nice if we could take the list of destroyed
932 # nodes, but I don't see an easy way for rollback() to do that
930 # nodes, but I don't see an easy way for rollback() to do that
933
931
934 # Ensure the persistent tag cache is updated. Doing it now
932 # Ensure the persistent tag cache is updated. Doing it now
935 # means that the tag cache only has to worry about destroyed
933 # means that the tag cache only has to worry about destroyed
936 # heads immediately after a strip/rollback. That in turn
934 # heads immediately after a strip/rollback. That in turn
937 # guarantees that "cachetip == currenttip" (comparing both rev
935 # guarantees that "cachetip == currenttip" (comparing both rev
938 # and node) always means no nodes have been added or destroyed.
936 # and node) always means no nodes have been added or destroyed.
939
937
940 # XXX this is suboptimal when qrefresh'ing: we strip the current
938 # XXX this is suboptimal when qrefresh'ing: we strip the current
941 # head, refresh the tag cache, then immediately add a new head.
939 # head, refresh the tag cache, then immediately add a new head.
942 # But I think doing it this way is necessary for the "instant
940 # But I think doing it this way is necessary for the "instant
943 # tag cache retrieval" case to work.
941 # tag cache retrieval" case to work.
944 tags_.findglobaltags(self.ui, self, {}, {})
942 tags_.findglobaltags(self.ui, self, {}, {})
945
943
946 def walk(self, match, node=None):
944 def walk(self, match, node=None):
947 '''
945 '''
948 walk recursively through the directory tree or a given
946 walk recursively through the directory tree or a given
949 changeset, finding all files matched by the match
947 changeset, finding all files matched by the match
950 function
948 function
951 '''
949 '''
952 return self[node].walk(match)
950 return self[node].walk(match)
953
951
954 def status(self, node1='.', node2=None, match=None,
952 def status(self, node1='.', node2=None, match=None,
955 ignored=False, clean=False, unknown=False):
953 ignored=False, clean=False, unknown=False):
956 """return status of files between two nodes or node and working directory
954 """return status of files between two nodes or node and working directory
957
955
958 If node1 is None, use the first dirstate parent instead.
956 If node1 is None, use the first dirstate parent instead.
959 If node2 is None, compare node1 with working directory.
957 If node2 is None, compare node1 with working directory.
960 """
958 """
961
959
962 def mfmatches(ctx):
960 def mfmatches(ctx):
963 mf = ctx.manifest().copy()
961 mf = ctx.manifest().copy()
964 for fn in mf.keys():
962 for fn in mf.keys():
965 if not match(fn):
963 if not match(fn):
966 del mf[fn]
964 del mf[fn]
967 return mf
965 return mf
968
966
969 if isinstance(node1, context.changectx):
967 if isinstance(node1, context.changectx):
970 ctx1 = node1
968 ctx1 = node1
971 else:
969 else:
972 ctx1 = self[node1]
970 ctx1 = self[node1]
973 if isinstance(node2, context.changectx):
971 if isinstance(node2, context.changectx):
974 ctx2 = node2
972 ctx2 = node2
975 else:
973 else:
976 ctx2 = self[node2]
974 ctx2 = self[node2]
977
975
978 working = ctx2.rev() is None
976 working = ctx2.rev() is None
979 parentworking = working and ctx1 == self['.']
977 parentworking = working and ctx1 == self['.']
980 match = match or match_.always(self.root, self.getcwd())
978 match = match or match_.always(self.root, self.getcwd())
981 listignored, listclean, listunknown = ignored, clean, unknown
979 listignored, listclean, listunknown = ignored, clean, unknown
982
980
983 # load earliest manifest first for caching reasons
981 # load earliest manifest first for caching reasons
984 if not working and ctx2.rev() < ctx1.rev():
982 if not working and ctx2.rev() < ctx1.rev():
985 ctx2.manifest()
983 ctx2.manifest()
986
984
987 if not parentworking:
985 if not parentworking:
988 def bad(f, msg):
986 def bad(f, msg):
989 if f not in ctx1:
987 if f not in ctx1:
990 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
988 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
991 match.bad = bad
989 match.bad = bad
992
990
993 if working: # we need to scan the working dir
991 if working: # we need to scan the working dir
994 s = self.dirstate.status(match, listignored, listclean, listunknown)
992 s = self.dirstate.status(match, listignored, listclean, listunknown)
995 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
993 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
996
994
997 # check for any possibly clean files
995 # check for any possibly clean files
998 if parentworking and cmp:
996 if parentworking and cmp:
999 fixup = []
997 fixup = []
1000 # do a full compare of any files that might have changed
998 # do a full compare of any files that might have changed
1001 for f in sorted(cmp):
999 for f in sorted(cmp):
1002 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1000 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1003 or ctx1[f].cmp(ctx2[f].data())):
1001 or ctx1[f].cmp(ctx2[f].data())):
1004 modified.append(f)
1002 modified.append(f)
1005 else:
1003 else:
1006 fixup.append(f)
1004 fixup.append(f)
1007
1005
1008 if listclean:
1006 if listclean:
1009 clean += fixup
1007 clean += fixup
1010
1008
1011 # update dirstate for files that are actually clean
1009 # update dirstate for files that are actually clean
1012 if fixup:
1010 if fixup:
1013 try:
1011 try:
1014 # updating the dirstate is optional
1012 # updating the dirstate is optional
1015 # so we don't wait on the lock
1013 # so we don't wait on the lock
1016 wlock = self.wlock(False)
1014 wlock = self.wlock(False)
1017 try:
1015 try:
1018 for f in fixup:
1016 for f in fixup:
1019 self.dirstate.normal(f)
1017 self.dirstate.normal(f)
1020 finally:
1018 finally:
1021 wlock.release()
1019 wlock.release()
1022 except error.LockError:
1020 except error.LockError:
1023 pass
1021 pass
1024
1022
1025 if not parentworking:
1023 if not parentworking:
1026 mf1 = mfmatches(ctx1)
1024 mf1 = mfmatches(ctx1)
1027 if working:
1025 if working:
1028 # we are comparing working dir against non-parent
1026 # we are comparing working dir against non-parent
1029 # generate a pseudo-manifest for the working dir
1027 # generate a pseudo-manifest for the working dir
1030 mf2 = mfmatches(self['.'])
1028 mf2 = mfmatches(self['.'])
1031 for f in cmp + modified + added:
1029 for f in cmp + modified + added:
1032 mf2[f] = None
1030 mf2[f] = None
1033 mf2.set(f, ctx2.flags(f))
1031 mf2.set(f, ctx2.flags(f))
1034 for f in removed:
1032 for f in removed:
1035 if f in mf2:
1033 if f in mf2:
1036 del mf2[f]
1034 del mf2[f]
1037 else:
1035 else:
1038 # we are comparing two revisions
1036 # we are comparing two revisions
1039 deleted, unknown, ignored = [], [], []
1037 deleted, unknown, ignored = [], [], []
1040 mf2 = mfmatches(ctx2)
1038 mf2 = mfmatches(ctx2)
1041
1039
1042 modified, added, clean = [], [], []
1040 modified, added, clean = [], [], []
1043 for fn in mf2:
1041 for fn in mf2:
1044 if fn in mf1:
1042 if fn in mf1:
1045 if (mf1.flags(fn) != mf2.flags(fn) or
1043 if (mf1.flags(fn) != mf2.flags(fn) or
1046 (mf1[fn] != mf2[fn] and
1044 (mf1[fn] != mf2[fn] and
1047 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1045 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1048 modified.append(fn)
1046 modified.append(fn)
1049 elif listclean:
1047 elif listclean:
1050 clean.append(fn)
1048 clean.append(fn)
1051 del mf1[fn]
1049 del mf1[fn]
1052 else:
1050 else:
1053 added.append(fn)
1051 added.append(fn)
1054 removed = mf1.keys()
1052 removed = mf1.keys()
1055
1053
1056 r = modified, added, removed, deleted, unknown, ignored, clean
1054 r = modified, added, removed, deleted, unknown, ignored, clean
1057 [l.sort() for l in r]
1055 [l.sort() for l in r]
1058 return r
1056 return r
1059
1057
1060 def add(self, list):
1058 def add(self, list):
1061 wlock = self.wlock()
1059 wlock = self.wlock()
1062 try:
1060 try:
1063 rejected = []
1061 rejected = []
1064 for f in list:
1062 for f in list:
1065 p = self.wjoin(f)
1063 p = self.wjoin(f)
1066 try:
1064 try:
1067 st = os.lstat(p)
1065 st = os.lstat(p)
1068 except:
1066 except:
1069 self.ui.warn(_("%s does not exist!\n") % f)
1067 self.ui.warn(_("%s does not exist!\n") % f)
1070 rejected.append(f)
1068 rejected.append(f)
1071 continue
1069 continue
1072 if st.st_size > 10000000:
1070 if st.st_size > 10000000:
1073 self.ui.warn(_("%s: files over 10MB may cause memory and"
1071 self.ui.warn(_("%s: files over 10MB may cause memory and"
1074 " performance problems\n"
1072 " performance problems\n"
1075 "(use 'hg revert %s' to unadd the file)\n")
1073 "(use 'hg revert %s' to unadd the file)\n")
1076 % (f, f))
1074 % (f, f))
1077 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1075 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1078 self.ui.warn(_("%s not added: only files and symlinks "
1076 self.ui.warn(_("%s not added: only files and symlinks "
1079 "supported currently\n") % f)
1077 "supported currently\n") % f)
1080 rejected.append(p)
1078 rejected.append(p)
1081 elif self.dirstate[f] in 'amn':
1079 elif self.dirstate[f] in 'amn':
1082 self.ui.warn(_("%s already tracked!\n") % f)
1080 self.ui.warn(_("%s already tracked!\n") % f)
1083 elif self.dirstate[f] == 'r':
1081 elif self.dirstate[f] == 'r':
1084 self.dirstate.normallookup(f)
1082 self.dirstate.normallookup(f)
1085 else:
1083 else:
1086 self.dirstate.add(f)
1084 self.dirstate.add(f)
1087 return rejected
1085 return rejected
1088 finally:
1086 finally:
1089 wlock.release()
1087 wlock.release()
1090
1088
1091 def forget(self, list):
1089 def forget(self, list):
1092 wlock = self.wlock()
1090 wlock = self.wlock()
1093 try:
1091 try:
1094 for f in list:
1092 for f in list:
1095 if self.dirstate[f] != 'a':
1093 if self.dirstate[f] != 'a':
1096 self.ui.warn(_("%s not added!\n") % f)
1094 self.ui.warn(_("%s not added!\n") % f)
1097 else:
1095 else:
1098 self.dirstate.forget(f)
1096 self.dirstate.forget(f)
1099 finally:
1097 finally:
1100 wlock.release()
1098 wlock.release()
1101
1099
1102 def remove(self, list, unlink=False):
1100 def remove(self, list, unlink=False):
1103 if unlink:
1101 if unlink:
1104 for f in list:
1102 for f in list:
1105 try:
1103 try:
1106 util.unlink(self.wjoin(f))
1104 util.unlink(self.wjoin(f))
1107 except OSError, inst:
1105 except OSError, inst:
1108 if inst.errno != errno.ENOENT:
1106 if inst.errno != errno.ENOENT:
1109 raise
1107 raise
1110 wlock = self.wlock()
1108 wlock = self.wlock()
1111 try:
1109 try:
1112 for f in list:
1110 for f in list:
1113 if unlink and os.path.exists(self.wjoin(f)):
1111 if unlink and os.path.exists(self.wjoin(f)):
1114 self.ui.warn(_("%s still exists!\n") % f)
1112 self.ui.warn(_("%s still exists!\n") % f)
1115 elif self.dirstate[f] == 'a':
1113 elif self.dirstate[f] == 'a':
1116 self.dirstate.forget(f)
1114 self.dirstate.forget(f)
1117 elif f not in self.dirstate:
1115 elif f not in self.dirstate:
1118 self.ui.warn(_("%s not tracked!\n") % f)
1116 self.ui.warn(_("%s not tracked!\n") % f)
1119 else:
1117 else:
1120 self.dirstate.remove(f)
1118 self.dirstate.remove(f)
1121 finally:
1119 finally:
1122 wlock.release()
1120 wlock.release()
1123
1121
1124 def undelete(self, list):
1122 def undelete(self, list):
1125 manifests = [self.manifest.read(self.changelog.read(p)[0])
1123 manifests = [self.manifest.read(self.changelog.read(p)[0])
1126 for p in self.dirstate.parents() if p != nullid]
1124 for p in self.dirstate.parents() if p != nullid]
1127 wlock = self.wlock()
1125 wlock = self.wlock()
1128 try:
1126 try:
1129 for f in list:
1127 for f in list:
1130 if self.dirstate[f] != 'r':
1128 if self.dirstate[f] != 'r':
1131 self.ui.warn(_("%s not removed!\n") % f)
1129 self.ui.warn(_("%s not removed!\n") % f)
1132 else:
1130 else:
1133 m = f in manifests[0] and manifests[0] or manifests[1]
1131 m = f in manifests[0] and manifests[0] or manifests[1]
1134 t = self.file(f).read(m[f])
1132 t = self.file(f).read(m[f])
1135 self.wwrite(f, t, m.flags(f))
1133 self.wwrite(f, t, m.flags(f))
1136 self.dirstate.normal(f)
1134 self.dirstate.normal(f)
1137 finally:
1135 finally:
1138 wlock.release()
1136 wlock.release()
1139
1137
1140 def copy(self, source, dest):
1138 def copy(self, source, dest):
1141 p = self.wjoin(dest)
1139 p = self.wjoin(dest)
1142 if not (os.path.exists(p) or os.path.islink(p)):
1140 if not (os.path.exists(p) or os.path.islink(p)):
1143 self.ui.warn(_("%s does not exist!\n") % dest)
1141 self.ui.warn(_("%s does not exist!\n") % dest)
1144 elif not (os.path.isfile(p) or os.path.islink(p)):
1142 elif not (os.path.isfile(p) or os.path.islink(p)):
1145 self.ui.warn(_("copy failed: %s is not a file or a "
1143 self.ui.warn(_("copy failed: %s is not a file or a "
1146 "symbolic link\n") % dest)
1144 "symbolic link\n") % dest)
1147 else:
1145 else:
1148 wlock = self.wlock()
1146 wlock = self.wlock()
1149 try:
1147 try:
1150 if self.dirstate[dest] in '?r':
1148 if self.dirstate[dest] in '?r':
1151 self.dirstate.add(dest)
1149 self.dirstate.add(dest)
1152 self.dirstate.copy(source, dest)
1150 self.dirstate.copy(source, dest)
1153 finally:
1151 finally:
1154 wlock.release()
1152 wlock.release()
1155
1153
1156 def heads(self, start=None):
1154 def heads(self, start=None):
1157 heads = self.changelog.heads(start)
1155 heads = self.changelog.heads(start)
1158 # sort the output in rev descending order
1156 # sort the output in rev descending order
1159 heads = [(-self.changelog.rev(h), h) for h in heads]
1157 heads = [(-self.changelog.rev(h), h) for h in heads]
1160 return [n for (r, n) in sorted(heads)]
1158 return [n for (r, n) in sorted(heads)]
1161
1159
1162 def branchheads(self, branch=None, start=None, closed=False):
1160 def branchheads(self, branch=None, start=None, closed=False):
1163 '''return a (possibly filtered) list of heads for the given branch
1161 '''return a (possibly filtered) list of heads for the given branch
1164
1162
1165 Heads are returned in topological order, from newest to oldest.
1163 Heads are returned in topological order, from newest to oldest.
1166 If branch is None, use the dirstate branch.
1164 If branch is None, use the dirstate branch.
1167 If start is not None, return only heads reachable from start.
1165 If start is not None, return only heads reachable from start.
1168 If closed is True, return heads that are marked as closed as well.
1166 If closed is True, return heads that are marked as closed as well.
1169 '''
1167 '''
1170 if branch is None:
1168 if branch is None:
1171 branch = self[None].branch()
1169 branch = self[None].branch()
1172 branches = self.lbranchmap()
1170 branches = self.lbranchmap()
1173 if branch not in branches:
1171 if branch not in branches:
1174 return []
1172 return []
1175 # the cache returns heads ordered lowest to highest
1173 # the cache returns heads ordered lowest to highest
1176 bheads = list(reversed(branches[branch]))
1174 bheads = list(reversed(branches[branch]))
1177 if start is not None:
1175 if start is not None:
1178 # filter out the heads that cannot be reached from startrev
1176 # filter out the heads that cannot be reached from startrev
1179 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1177 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1180 bheads = [h for h in bheads if h in fbheads]
1178 bheads = [h for h in bheads if h in fbheads]
1181 if not closed:
1179 if not closed:
1182 bheads = [h for h in bheads if
1180 bheads = [h for h in bheads if
1183 ('close' not in self.changelog.read(h)[5])]
1181 ('close' not in self.changelog.read(h)[5])]
1184 return bheads
1182 return bheads
1185
1183
1186 def branches(self, nodes):
1184 def branches(self, nodes):
1187 if not nodes:
1185 if not nodes:
1188 nodes = [self.changelog.tip()]
1186 nodes = [self.changelog.tip()]
1189 b = []
1187 b = []
1190 for n in nodes:
1188 for n in nodes:
1191 t = n
1189 t = n
1192 while 1:
1190 while 1:
1193 p = self.changelog.parents(n)
1191 p = self.changelog.parents(n)
1194 if p[1] != nullid or p[0] == nullid:
1192 if p[1] != nullid or p[0] == nullid:
1195 b.append((t, n, p[0], p[1]))
1193 b.append((t, n, p[0], p[1]))
1196 break
1194 break
1197 n = p[0]
1195 n = p[0]
1198 return b
1196 return b
1199
1197
1200 def between(self, pairs):
1198 def between(self, pairs):
1201 r = []
1199 r = []
1202
1200
1203 for top, bottom in pairs:
1201 for top, bottom in pairs:
1204 n, l, i = top, [], 0
1202 n, l, i = top, [], 0
1205 f = 1
1203 f = 1
1206
1204
1207 while n != bottom and n != nullid:
1205 while n != bottom and n != nullid:
1208 p = self.changelog.parents(n)[0]
1206 p = self.changelog.parents(n)[0]
1209 if i == f:
1207 if i == f:
1210 l.append(n)
1208 l.append(n)
1211 f = f * 2
1209 f = f * 2
1212 n = p
1210 n = p
1213 i += 1
1211 i += 1
1214
1212
1215 r.append(l)
1213 r.append(l)
1216
1214
1217 return r
1215 return r
1218
1216
1219 def findincoming(self, remote, base=None, heads=None, force=False):
1217 def findincoming(self, remote, base=None, heads=None, force=False):
1220 """Return list of roots of the subsets of missing nodes from remote
1218 """Return list of roots of the subsets of missing nodes from remote
1221
1219
1222 If base dict is specified, assume that these nodes and their parents
1220 If base dict is specified, assume that these nodes and their parents
1223 exist on the remote side and that no child of a node of base exists
1221 exist on the remote side and that no child of a node of base exists
1224 in both remote and self.
1222 in both remote and self.
1225 Furthermore base will be updated to include the nodes that exists
1223 Furthermore base will be updated to include the nodes that exists
1226 in self and remote but no children exists in self and remote.
1224 in self and remote but no children exists in self and remote.
1227 If a list of heads is specified, return only nodes which are heads
1225 If a list of heads is specified, return only nodes which are heads
1228 or ancestors of these heads.
1226 or ancestors of these heads.
1229
1227
1230 All the ancestors of base are in self and in remote.
1228 All the ancestors of base are in self and in remote.
1231 All the descendants of the list returned are missing in self.
1229 All the descendants of the list returned are missing in self.
1232 (and so we know that the rest of the nodes are missing in remote, see
1230 (and so we know that the rest of the nodes are missing in remote, see
1233 outgoing)
1231 outgoing)
1234 """
1232 """
1235 return self.findcommonincoming(remote, base, heads, force)[1]
1233 return self.findcommonincoming(remote, base, heads, force)[1]
1236
1234
1237 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1235 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1238 """Return a tuple (common, missing roots, heads) used to identify
1236 """Return a tuple (common, missing roots, heads) used to identify
1239 missing nodes from remote.
1237 missing nodes from remote.
1240
1238
1241 If base dict is specified, assume that these nodes and their parents
1239 If base dict is specified, assume that these nodes and their parents
1242 exist on the remote side and that no child of a node of base exists
1240 exist on the remote side and that no child of a node of base exists
1243 in both remote and self.
1241 in both remote and self.
1244 Furthermore base will be updated to include the nodes that exists
1242 Furthermore base will be updated to include the nodes that exists
1245 in self and remote but no children exists in self and remote.
1243 in self and remote but no children exists in self and remote.
1246 If a list of heads is specified, return only nodes which are heads
1244 If a list of heads is specified, return only nodes which are heads
1247 or ancestors of these heads.
1245 or ancestors of these heads.
1248
1246
1249 All the ancestors of base are in self and in remote.
1247 All the ancestors of base are in self and in remote.
1250 """
1248 """
1251 m = self.changelog.nodemap
1249 m = self.changelog.nodemap
1252 search = []
1250 search = []
1253 fetch = set()
1251 fetch = set()
1254 seen = set()
1252 seen = set()
1255 seenbranch = set()
1253 seenbranch = set()
1256 if base is None:
1254 if base is None:
1257 base = {}
1255 base = {}
1258
1256
1259 if not heads:
1257 if not heads:
1260 heads = remote.heads()
1258 heads = remote.heads()
1261
1259
1262 if self.changelog.tip() == nullid:
1260 if self.changelog.tip() == nullid:
1263 base[nullid] = 1
1261 base[nullid] = 1
1264 if heads != [nullid]:
1262 if heads != [nullid]:
1265 return [nullid], [nullid], list(heads)
1263 return [nullid], [nullid], list(heads)
1266 return [nullid], [], []
1264 return [nullid], [], []
1267
1265
1268 # assume we're closer to the tip than the root
1266 # assume we're closer to the tip than the root
1269 # and start by examining the heads
1267 # and start by examining the heads
1270 self.ui.status(_("searching for changes\n"))
1268 self.ui.status(_("searching for changes\n"))
1271
1269
1272 unknown = []
1270 unknown = []
1273 for h in heads:
1271 for h in heads:
1274 if h not in m:
1272 if h not in m:
1275 unknown.append(h)
1273 unknown.append(h)
1276 else:
1274 else:
1277 base[h] = 1
1275 base[h] = 1
1278
1276
1279 heads = unknown
1277 heads = unknown
1280 if not unknown:
1278 if not unknown:
1281 return base.keys(), [], []
1279 return base.keys(), [], []
1282
1280
1283 req = set(unknown)
1281 req = set(unknown)
1284 reqcnt = 0
1282 reqcnt = 0
1285
1283
1286 # search through remote branches
1284 # search through remote branches
1287 # a 'branch' here is a linear segment of history, with four parts:
1285 # a 'branch' here is a linear segment of history, with four parts:
1288 # head, root, first parent, second parent
1286 # head, root, first parent, second parent
1289 # (a branch always has two parents (or none) by definition)
1287 # (a branch always has two parents (or none) by definition)
1290 unknown = remote.branches(unknown)
1288 unknown = remote.branches(unknown)
1291 while unknown:
1289 while unknown:
1292 r = []
1290 r = []
1293 while unknown:
1291 while unknown:
1294 n = unknown.pop(0)
1292 n = unknown.pop(0)
1295 if n[0] in seen:
1293 if n[0] in seen:
1296 continue
1294 continue
1297
1295
1298 self.ui.debug("examining %s:%s\n"
1296 self.ui.debug("examining %s:%s\n"
1299 % (short(n[0]), short(n[1])))
1297 % (short(n[0]), short(n[1])))
1300 if n[0] == nullid: # found the end of the branch
1298 if n[0] == nullid: # found the end of the branch
1301 pass
1299 pass
1302 elif n in seenbranch:
1300 elif n in seenbranch:
1303 self.ui.debug("branch already found\n")
1301 self.ui.debug("branch already found\n")
1304 continue
1302 continue
1305 elif n[1] and n[1] in m: # do we know the base?
1303 elif n[1] and n[1] in m: # do we know the base?
1306 self.ui.debug("found incomplete branch %s:%s\n"
1304 self.ui.debug("found incomplete branch %s:%s\n"
1307 % (short(n[0]), short(n[1])))
1305 % (short(n[0]), short(n[1])))
1308 search.append(n[0:2]) # schedule branch range for scanning
1306 search.append(n[0:2]) # schedule branch range for scanning
1309 seenbranch.add(n)
1307 seenbranch.add(n)
1310 else:
1308 else:
1311 if n[1] not in seen and n[1] not in fetch:
1309 if n[1] not in seen and n[1] not in fetch:
1312 if n[2] in m and n[3] in m:
1310 if n[2] in m and n[3] in m:
1313 self.ui.debug("found new changeset %s\n" %
1311 self.ui.debug("found new changeset %s\n" %
1314 short(n[1]))
1312 short(n[1]))
1315 fetch.add(n[1]) # earliest unknown
1313 fetch.add(n[1]) # earliest unknown
1316 for p in n[2:4]:
1314 for p in n[2:4]:
1317 if p in m:
1315 if p in m:
1318 base[p] = 1 # latest known
1316 base[p] = 1 # latest known
1319
1317
1320 for p in n[2:4]:
1318 for p in n[2:4]:
1321 if p not in req and p not in m:
1319 if p not in req and p not in m:
1322 r.append(p)
1320 r.append(p)
1323 req.add(p)
1321 req.add(p)
1324 seen.add(n[0])
1322 seen.add(n[0])
1325
1323
1326 if r:
1324 if r:
1327 reqcnt += 1
1325 reqcnt += 1
1328 self.ui.debug("request %d: %s\n" %
1326 self.ui.debug("request %d: %s\n" %
1329 (reqcnt, " ".join(map(short, r))))
1327 (reqcnt, " ".join(map(short, r))))
1330 for p in xrange(0, len(r), 10):
1328 for p in xrange(0, len(r), 10):
1331 for b in remote.branches(r[p:p+10]):
1329 for b in remote.branches(r[p:p+10]):
1332 self.ui.debug("received %s:%s\n" %
1330 self.ui.debug("received %s:%s\n" %
1333 (short(b[0]), short(b[1])))
1331 (short(b[0]), short(b[1])))
1334 unknown.append(b)
1332 unknown.append(b)
1335
1333
1336 # do binary search on the branches we found
1334 # do binary search on the branches we found
1337 while search:
1335 while search:
1338 newsearch = []
1336 newsearch = []
1339 reqcnt += 1
1337 reqcnt += 1
1340 for n, l in zip(search, remote.between(search)):
1338 for n, l in zip(search, remote.between(search)):
1341 l.append(n[1])
1339 l.append(n[1])
1342 p = n[0]
1340 p = n[0]
1343 f = 1
1341 f = 1
1344 for i in l:
1342 for i in l:
1345 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1343 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1346 if i in m:
1344 if i in m:
1347 if f <= 2:
1345 if f <= 2:
1348 self.ui.debug("found new branch changeset %s\n" %
1346 self.ui.debug("found new branch changeset %s\n" %
1349 short(p))
1347 short(p))
1350 fetch.add(p)
1348 fetch.add(p)
1351 base[i] = 1
1349 base[i] = 1
1352 else:
1350 else:
1353 self.ui.debug("narrowed branch search to %s:%s\n"
1351 self.ui.debug("narrowed branch search to %s:%s\n"
1354 % (short(p), short(i)))
1352 % (short(p), short(i)))
1355 newsearch.append((p, i))
1353 newsearch.append((p, i))
1356 break
1354 break
1357 p, f = i, f * 2
1355 p, f = i, f * 2
1358 search = newsearch
1356 search = newsearch
1359
1357
1360 # sanity check our fetch list
1358 # sanity check our fetch list
1361 for f in fetch:
1359 for f in fetch:
1362 if f in m:
1360 if f in m:
1363 raise error.RepoError(_("already have changeset ")
1361 raise error.RepoError(_("already have changeset ")
1364 + short(f[:4]))
1362 + short(f[:4]))
1365
1363
1366 if base.keys() == [nullid]:
1364 if base.keys() == [nullid]:
1367 if force:
1365 if force:
1368 self.ui.warn(_("warning: repository is unrelated\n"))
1366 self.ui.warn(_("warning: repository is unrelated\n"))
1369 else:
1367 else:
1370 raise util.Abort(_("repository is unrelated"))
1368 raise util.Abort(_("repository is unrelated"))
1371
1369
1372 self.ui.debug("found new changesets starting at " +
1370 self.ui.debug("found new changesets starting at " +
1373 " ".join([short(f) for f in fetch]) + "\n")
1371 " ".join([short(f) for f in fetch]) + "\n")
1374
1372
1375 self.ui.debug("%d total queries\n" % reqcnt)
1373 self.ui.debug("%d total queries\n" % reqcnt)
1376
1374
1377 return base.keys(), list(fetch), heads
1375 return base.keys(), list(fetch), heads
1378
1376
1379 def findoutgoing(self, remote, base=None, heads=None, force=False):
1377 def findoutgoing(self, remote, base=None, heads=None, force=False):
1380 """Return list of nodes that are roots of subsets not in remote
1378 """Return list of nodes that are roots of subsets not in remote
1381
1379
1382 If base dict is specified, assume that these nodes and their parents
1380 If base dict is specified, assume that these nodes and their parents
1383 exist on the remote side.
1381 exist on the remote side.
1384 If a list of heads is specified, return only nodes which are heads
1382 If a list of heads is specified, return only nodes which are heads
1385 or ancestors of these heads, and return a second element which
1383 or ancestors of these heads, and return a second element which
1386 contains all remote heads which get new children.
1384 contains all remote heads which get new children.
1387 """
1385 """
1388 if base is None:
1386 if base is None:
1389 base = {}
1387 base = {}
1390 self.findincoming(remote, base, heads, force=force)
1388 self.findincoming(remote, base, heads, force=force)
1391
1389
1392 self.ui.debug("common changesets up to "
1390 self.ui.debug("common changesets up to "
1393 + " ".join(map(short, base.keys())) + "\n")
1391 + " ".join(map(short, base.keys())) + "\n")
1394
1392
1395 remain = set(self.changelog.nodemap)
1393 remain = set(self.changelog.nodemap)
1396
1394
1397 # prune everything remote has from the tree
1395 # prune everything remote has from the tree
1398 remain.remove(nullid)
1396 remain.remove(nullid)
1399 remove = base.keys()
1397 remove = base.keys()
1400 while remove:
1398 while remove:
1401 n = remove.pop(0)
1399 n = remove.pop(0)
1402 if n in remain:
1400 if n in remain:
1403 remain.remove(n)
1401 remain.remove(n)
1404 for p in self.changelog.parents(n):
1402 for p in self.changelog.parents(n):
1405 remove.append(p)
1403 remove.append(p)
1406
1404
1407 # find every node whose parents have been pruned
1405 # find every node whose parents have been pruned
1408 subset = []
1406 subset = []
1409 # find every remote head that will get new children
1407 # find every remote head that will get new children
1410 updated_heads = set()
1408 updated_heads = set()
1411 for n in remain:
1409 for n in remain:
1412 p1, p2 = self.changelog.parents(n)
1410 p1, p2 = self.changelog.parents(n)
1413 if p1 not in remain and p2 not in remain:
1411 if p1 not in remain and p2 not in remain:
1414 subset.append(n)
1412 subset.append(n)
1415 if heads:
1413 if heads:
1416 if p1 in heads:
1414 if p1 in heads:
1417 updated_heads.add(p1)
1415 updated_heads.add(p1)
1418 if p2 in heads:
1416 if p2 in heads:
1419 updated_heads.add(p2)
1417 updated_heads.add(p2)
1420
1418
1421 # this is the set of all roots we have to push
1419 # this is the set of all roots we have to push
1422 if heads:
1420 if heads:
1423 return subset, list(updated_heads)
1421 return subset, list(updated_heads)
1424 else:
1422 else:
1425 return subset
1423 return subset
1426
1424
1427 def pull(self, remote, heads=None, force=False):
1425 def pull(self, remote, heads=None, force=False):
1428 lock = self.lock()
1426 lock = self.lock()
1429 try:
1427 try:
1430 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1428 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1431 force=force)
1429 force=force)
1432 if fetch == [nullid]:
1430 if fetch == [nullid]:
1433 self.ui.status(_("requesting all changes\n"))
1431 self.ui.status(_("requesting all changes\n"))
1434
1432
1435 if not fetch:
1433 if not fetch:
1436 self.ui.status(_("no changes found\n"))
1434 self.ui.status(_("no changes found\n"))
1437 return 0
1435 return 0
1438
1436
1439 if heads is None and remote.capable('changegroupsubset'):
1437 if heads is None and remote.capable('changegroupsubset'):
1440 heads = rheads
1438 heads = rheads
1441
1439
1442 if heads is None:
1440 if heads is None:
1443 cg = remote.changegroup(fetch, 'pull')
1441 cg = remote.changegroup(fetch, 'pull')
1444 else:
1442 else:
1445 if not remote.capable('changegroupsubset'):
1443 if not remote.capable('changegroupsubset'):
1446 raise util.Abort(_("Partial pull cannot be done because "
1444 raise util.Abort(_("Partial pull cannot be done because "
1447 "other repository doesn't support "
1445 "other repository doesn't support "
1448 "changegroupsubset."))
1446 "changegroupsubset."))
1449 cg = remote.changegroupsubset(fetch, heads, 'pull')
1447 cg = remote.changegroupsubset(fetch, heads, 'pull')
1450 return self.addchangegroup(cg, 'pull', remote.url())
1448 return self.addchangegroup(cg, 'pull', remote.url())
1451 finally:
1449 finally:
1452 lock.release()
1450 lock.release()
1453
1451
1454 def push(self, remote, force=False, revs=None):
1452 def push(self, remote, force=False, revs=None):
1455 # there are two ways to push to remote repo:
1453 # there are two ways to push to remote repo:
1456 #
1454 #
1457 # addchangegroup assumes local user can lock remote
1455 # addchangegroup assumes local user can lock remote
1458 # repo (local filesystem, old ssh servers).
1456 # repo (local filesystem, old ssh servers).
1459 #
1457 #
1460 # unbundle assumes local user cannot lock remote repo (new ssh
1458 # unbundle assumes local user cannot lock remote repo (new ssh
1461 # servers, http servers).
1459 # servers, http servers).
1462
1460
1463 if remote.capable('unbundle'):
1461 if remote.capable('unbundle'):
1464 return self.push_unbundle(remote, force, revs)
1462 return self.push_unbundle(remote, force, revs)
1465 return self.push_addchangegroup(remote, force, revs)
1463 return self.push_addchangegroup(remote, force, revs)
1466
1464
1467 def prepush(self, remote, force, revs):
1465 def prepush(self, remote, force, revs):
1468 '''Analyze the local and remote repositories and determine which
1466 '''Analyze the local and remote repositories and determine which
1469 changesets need to be pushed to the remote. Return a tuple
1467 changesets need to be pushed to the remote. Return a tuple
1470 (changegroup, remoteheads). changegroup is a readable file-like
1468 (changegroup, remoteheads). changegroup is a readable file-like
1471 object whose read() returns successive changegroup chunks ready to
1469 object whose read() returns successive changegroup chunks ready to
1472 be sent over the wire. remoteheads is the list of remote heads.
1470 be sent over the wire. remoteheads is the list of remote heads.
1473 '''
1471 '''
1474 common = {}
1472 common = {}
1475 remote_heads = remote.heads()
1473 remote_heads = remote.heads()
1476 inc = self.findincoming(remote, common, remote_heads, force=force)
1474 inc = self.findincoming(remote, common, remote_heads, force=force)
1477
1475
1478 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1476 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1479 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1477 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1480
1478
1481 def checkbranch(lheads, rheads, updatelb):
1479 def checkbranch(lheads, rheads, updatelb):
1482 '''
1480 '''
1483 check whether there are more local heads than remote heads on
1481 check whether there are more local heads than remote heads on
1484 a specific branch.
1482 a specific branch.
1485
1483
1486 lheads: local branch heads
1484 lheads: local branch heads
1487 rheads: remote branch heads
1485 rheads: remote branch heads
1488 updatelb: outgoing local branch bases
1486 updatelb: outgoing local branch bases
1489 '''
1487 '''
1490
1488
1491 warn = 0
1489 warn = 0
1492
1490
1493 if not revs and len(lheads) > len(rheads):
1491 if not revs and len(lheads) > len(rheads):
1494 warn = 1
1492 warn = 1
1495 else:
1493 else:
1496 # add local heads involved in the push
1494 # add local heads involved in the push
1497 updatelheads = [self.changelog.heads(x, lheads)
1495 updatelheads = [self.changelog.heads(x, lheads)
1498 for x in updatelb]
1496 for x in updatelb]
1499 newheads = set(sum(updatelheads, [])) & set(lheads)
1497 newheads = set(sum(updatelheads, [])) & set(lheads)
1500
1498
1501 if not newheads:
1499 if not newheads:
1502 return True
1500 return True
1503
1501
1504 # add heads we don't have or that are not involved in the push
1502 # add heads we don't have or that are not involved in the push
1505 for r in rheads:
1503 for r in rheads:
1506 if r in self.changelog.nodemap:
1504 if r in self.changelog.nodemap:
1507 desc = self.changelog.heads(r, heads)
1505 desc = self.changelog.heads(r, heads)
1508 l = [h for h in heads if h in desc]
1506 l = [h for h in heads if h in desc]
1509 if not l:
1507 if not l:
1510 newheads.add(r)
1508 newheads.add(r)
1511 else:
1509 else:
1512 newheads.add(r)
1510 newheads.add(r)
1513 if len(newheads) > len(rheads):
1511 if len(newheads) > len(rheads):
1514 warn = 1
1512 warn = 1
1515
1513
1516 if warn:
1514 if warn:
1517 if not rheads: # new branch requires --force
1515 if not rheads: # new branch requires --force
1518 self.ui.warn(_("abort: push creates new"
1516 self.ui.warn(_("abort: push creates new"
1519 " remote branch '%s'!\n") %
1517 " remote branch '%s'!\n") %
1520 self[updatelb[0]].branch())
1518 self[updatelb[0]].branch())
1521 else:
1519 else:
1522 self.ui.warn(_("abort: push creates new remote heads!\n"))
1520 self.ui.warn(_("abort: push creates new remote heads!\n"))
1523
1521
1524 self.ui.status(_("(did you forget to merge?"
1522 self.ui.status(_("(did you forget to merge?"
1525 " use push -f to force)\n"))
1523 " use push -f to force)\n"))
1526 return False
1524 return False
1527 return True
1525 return True
1528
1526
1529 if not bases:
1527 if not bases:
1530 self.ui.status(_("no changes found\n"))
1528 self.ui.status(_("no changes found\n"))
1531 return None, 1
1529 return None, 1
1532 elif not force:
1530 elif not force:
1533 # Check for each named branch if we're creating new remote heads.
1531 # Check for each named branch if we're creating new remote heads.
1534 # To be a remote head after push, node must be either:
1532 # To be a remote head after push, node must be either:
1535 # - unknown locally
1533 # - unknown locally
1536 # - a local outgoing head descended from update
1534 # - a local outgoing head descended from update
1537 # - a remote head that's known locally and not
1535 # - a remote head that's known locally and not
1538 # ancestral to an outgoing head
1536 # ancestral to an outgoing head
1539 #
1537 #
1540 # New named branches cannot be created without --force.
1538 # New named branches cannot be created without --force.
1541
1539
1542 if remote_heads != [nullid]:
1540 if remote_heads != [nullid]:
1543 if remote.capable('branchmap'):
1541 if remote.capable('branchmap'):
1544 localhds = {}
1542 localhds = {}
1545 if not revs:
1543 if not revs:
1546 localhds = self.branchmap()
1544 localhds = self.branchmap()
1547 else:
1545 else:
1548 for n in heads:
1546 for n in heads:
1549 branch = self[n].branch()
1547 branch = self[n].branch()
1550 if branch in localhds:
1548 if branch in localhds:
1551 localhds[branch].append(n)
1549 localhds[branch].append(n)
1552 else:
1550 else:
1553 localhds[branch] = [n]
1551 localhds[branch] = [n]
1554
1552
1555 remotehds = remote.branchmap()
1553 remotehds = remote.branchmap()
1556
1554
1557 for lh in localhds:
1555 for lh in localhds:
1558 if lh in remotehds:
1556 if lh in remotehds:
1559 rheads = remotehds[lh]
1557 rheads = remotehds[lh]
1560 else:
1558 else:
1561 rheads = []
1559 rheads = []
1562 lheads = localhds[lh]
1560 lheads = localhds[lh]
1563 updatelb = [upd for upd in update
1561 updatelb = [upd for upd in update
1564 if self[upd].branch() == lh]
1562 if self[upd].branch() == lh]
1565 if not updatelb:
1563 if not updatelb:
1566 continue
1564 continue
1567 if not checkbranch(lheads, rheads, updatelb):
1565 if not checkbranch(lheads, rheads, updatelb):
1568 return None, 0
1566 return None, 0
1569 else:
1567 else:
1570 if not checkbranch(heads, remote_heads, update):
1568 if not checkbranch(heads, remote_heads, update):
1571 return None, 0
1569 return None, 0
1572
1570
1573 if inc:
1571 if inc:
1574 self.ui.warn(_("note: unsynced remote changes!\n"))
1572 self.ui.warn(_("note: unsynced remote changes!\n"))
1575
1573
1576
1574
1577 if revs is None:
1575 if revs is None:
1578 # use the fast path, no race possible on push
1576 # use the fast path, no race possible on push
1579 cg = self._changegroup(common.keys(), 'push')
1577 cg = self._changegroup(common.keys(), 'push')
1580 else:
1578 else:
1581 cg = self.changegroupsubset(update, revs, 'push')
1579 cg = self.changegroupsubset(update, revs, 'push')
1582 return cg, remote_heads
1580 return cg, remote_heads
1583
1581
1584 def push_addchangegroup(self, remote, force, revs):
1582 def push_addchangegroup(self, remote, force, revs):
1585 lock = remote.lock()
1583 lock = remote.lock()
1586 try:
1584 try:
1587 ret = self.prepush(remote, force, revs)
1585 ret = self.prepush(remote, force, revs)
1588 if ret[0] is not None:
1586 if ret[0] is not None:
1589 cg, remote_heads = ret
1587 cg, remote_heads = ret
1590 return remote.addchangegroup(cg, 'push', self.url())
1588 return remote.addchangegroup(cg, 'push', self.url())
1591 return ret[1]
1589 return ret[1]
1592 finally:
1590 finally:
1593 lock.release()
1591 lock.release()
1594
1592
1595 def push_unbundle(self, remote, force, revs):
1593 def push_unbundle(self, remote, force, revs):
1596 # local repo finds heads on server, finds out what revs it
1594 # local repo finds heads on server, finds out what revs it
1597 # must push. once revs transferred, if server finds it has
1595 # must push. once revs transferred, if server finds it has
1598 # different heads (someone else won commit/push race), server
1596 # different heads (someone else won commit/push race), server
1599 # aborts.
1597 # aborts.
1600
1598
1601 ret = self.prepush(remote, force, revs)
1599 ret = self.prepush(remote, force, revs)
1602 if ret[0] is not None:
1600 if ret[0] is not None:
1603 cg, remote_heads = ret
1601 cg, remote_heads = ret
1604 if force: remote_heads = ['force']
1602 if force: remote_heads = ['force']
1605 return remote.unbundle(cg, remote_heads, 'push')
1603 return remote.unbundle(cg, remote_heads, 'push')
1606 return ret[1]
1604 return ret[1]
1607
1605
1608 def changegroupinfo(self, nodes, source):
1606 def changegroupinfo(self, nodes, source):
1609 if self.ui.verbose or source == 'bundle':
1607 if self.ui.verbose or source == 'bundle':
1610 self.ui.status(_("%d changesets found\n") % len(nodes))
1608 self.ui.status(_("%d changesets found\n") % len(nodes))
1611 if self.ui.debugflag:
1609 if self.ui.debugflag:
1612 self.ui.debug("list of changesets:\n")
1610 self.ui.debug("list of changesets:\n")
1613 for node in nodes:
1611 for node in nodes:
1614 self.ui.debug("%s\n" % hex(node))
1612 self.ui.debug("%s\n" % hex(node))
1615
1613
1616 def changegroupsubset(self, bases, heads, source, extranodes=None):
1614 def changegroupsubset(self, bases, heads, source, extranodes=None):
1617 """Compute a changegroup consisting of all the nodes that are
1615 """Compute a changegroup consisting of all the nodes that are
1618 descendents of any of the bases and ancestors of any of the heads.
1616 descendents of any of the bases and ancestors of any of the heads.
1619 Return a chunkbuffer object whose read() method will return
1617 Return a chunkbuffer object whose read() method will return
1620 successive changegroup chunks.
1618 successive changegroup chunks.
1621
1619
1622 It is fairly complex as determining which filenodes and which
1620 It is fairly complex as determining which filenodes and which
1623 manifest nodes need to be included for the changeset to be complete
1621 manifest nodes need to be included for the changeset to be complete
1624 is non-trivial.
1622 is non-trivial.
1625
1623
1626 Another wrinkle is doing the reverse, figuring out which changeset in
1624 Another wrinkle is doing the reverse, figuring out which changeset in
1627 the changegroup a particular filenode or manifestnode belongs to.
1625 the changegroup a particular filenode or manifestnode belongs to.
1628
1626
1629 The caller can specify some nodes that must be included in the
1627 The caller can specify some nodes that must be included in the
1630 changegroup using the extranodes argument. It should be a dict
1628 changegroup using the extranodes argument. It should be a dict
1631 where the keys are the filenames (or 1 for the manifest), and the
1629 where the keys are the filenames (or 1 for the manifest), and the
1632 values are lists of (node, linknode) tuples, where node is a wanted
1630 values are lists of (node, linknode) tuples, where node is a wanted
1633 node and linknode is the changelog node that should be transmitted as
1631 node and linknode is the changelog node that should be transmitted as
1634 the linkrev.
1632 the linkrev.
1635 """
1633 """
1636
1634
1637 if extranodes is None:
1635 if extranodes is None:
1638 # can we go through the fast path ?
1636 # can we go through the fast path ?
1639 heads.sort()
1637 heads.sort()
1640 allheads = self.heads()
1638 allheads = self.heads()
1641 allheads.sort()
1639 allheads.sort()
1642 if heads == allheads:
1640 if heads == allheads:
1643 common = []
1641 common = []
1644 # parents of bases are known from both sides
1642 # parents of bases are known from both sides
1645 for n in bases:
1643 for n in bases:
1646 for p in self.changelog.parents(n):
1644 for p in self.changelog.parents(n):
1647 if p != nullid:
1645 if p != nullid:
1648 common.append(p)
1646 common.append(p)
1649 return self._changegroup(common, source)
1647 return self._changegroup(common, source)
1650
1648
1651 self.hook('preoutgoing', throw=True, source=source)
1649 self.hook('preoutgoing', throw=True, source=source)
1652
1650
1653 # Set up some initial variables
1651 # Set up some initial variables
1654 # Make it easy to refer to self.changelog
1652 # Make it easy to refer to self.changelog
1655 cl = self.changelog
1653 cl = self.changelog
1656 # msng is short for missing - compute the list of changesets in this
1654 # msng is short for missing - compute the list of changesets in this
1657 # changegroup.
1655 # changegroup.
1658 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1656 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1659 self.changegroupinfo(msng_cl_lst, source)
1657 self.changegroupinfo(msng_cl_lst, source)
1660 # Some bases may turn out to be superfluous, and some heads may be
1658 # Some bases may turn out to be superfluous, and some heads may be
1661 # too. nodesbetween will return the minimal set of bases and heads
1659 # too. nodesbetween will return the minimal set of bases and heads
1662 # necessary to re-create the changegroup.
1660 # necessary to re-create the changegroup.
1663
1661
1664 # Known heads are the list of heads that it is assumed the recipient
1662 # Known heads are the list of heads that it is assumed the recipient
1665 # of this changegroup will know about.
1663 # of this changegroup will know about.
1666 knownheads = set()
1664 knownheads = set()
1667 # We assume that all parents of bases are known heads.
1665 # We assume that all parents of bases are known heads.
1668 for n in bases:
1666 for n in bases:
1669 knownheads.update(cl.parents(n))
1667 knownheads.update(cl.parents(n))
1670 knownheads.discard(nullid)
1668 knownheads.discard(nullid)
1671 knownheads = list(knownheads)
1669 knownheads = list(knownheads)
1672 if knownheads:
1670 if knownheads:
1673 # Now that we know what heads are known, we can compute which
1671 # Now that we know what heads are known, we can compute which
1674 # changesets are known. The recipient must know about all
1672 # changesets are known. The recipient must know about all
1675 # changesets required to reach the known heads from the null
1673 # changesets required to reach the known heads from the null
1676 # changeset.
1674 # changeset.
1677 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1675 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1678 junk = None
1676 junk = None
1679 # Transform the list into a set.
1677 # Transform the list into a set.
1680 has_cl_set = set(has_cl_set)
1678 has_cl_set = set(has_cl_set)
1681 else:
1679 else:
1682 # If there were no known heads, the recipient cannot be assumed to
1680 # If there were no known heads, the recipient cannot be assumed to
1683 # know about any changesets.
1681 # know about any changesets.
1684 has_cl_set = set()
1682 has_cl_set = set()
1685
1683
1686 # Make it easy to refer to self.manifest
1684 # Make it easy to refer to self.manifest
1687 mnfst = self.manifest
1685 mnfst = self.manifest
1688 # We don't know which manifests are missing yet
1686 # We don't know which manifests are missing yet
1689 msng_mnfst_set = {}
1687 msng_mnfst_set = {}
1690 # Nor do we know which filenodes are missing.
1688 # Nor do we know which filenodes are missing.
1691 msng_filenode_set = {}
1689 msng_filenode_set = {}
1692
1690
1693 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1691 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1694 junk = None
1692 junk = None
1695
1693
1696 # A changeset always belongs to itself, so the changenode lookup
1694 # A changeset always belongs to itself, so the changenode lookup
1697 # function for a changenode is identity.
1695 # function for a changenode is identity.
1698 def identity(x):
1696 def identity(x):
1699 return x
1697 return x
1700
1698
1701 # If we determine that a particular file or manifest node must be a
1699 # If we determine that a particular file or manifest node must be a
1702 # node that the recipient of the changegroup will already have, we can
1700 # node that the recipient of the changegroup will already have, we can
1703 # also assume the recipient will have all the parents. This function
1701 # also assume the recipient will have all the parents. This function
1704 # prunes them from the set of missing nodes.
1702 # prunes them from the set of missing nodes.
1705 def prune_parents(revlog, hasset, msngset):
1703 def prune_parents(revlog, hasset, msngset):
1706 haslst = list(hasset)
1704 haslst = list(hasset)
1707 haslst.sort(key=revlog.rev)
1705 haslst.sort(key=revlog.rev)
1708 for node in haslst:
1706 for node in haslst:
1709 parentlst = [p for p in revlog.parents(node) if p != nullid]
1707 parentlst = [p for p in revlog.parents(node) if p != nullid]
1710 while parentlst:
1708 while parentlst:
1711 n = parentlst.pop()
1709 n = parentlst.pop()
1712 if n not in hasset:
1710 if n not in hasset:
1713 hasset.add(n)
1711 hasset.add(n)
1714 p = [p for p in revlog.parents(n) if p != nullid]
1712 p = [p for p in revlog.parents(n) if p != nullid]
1715 parentlst.extend(p)
1713 parentlst.extend(p)
1716 for n in hasset:
1714 for n in hasset:
1717 msngset.pop(n, None)
1715 msngset.pop(n, None)
1718
1716
1719 # This is a function generating function used to set up an environment
1717 # This is a function generating function used to set up an environment
1720 # for the inner function to execute in.
1718 # for the inner function to execute in.
1721 def manifest_and_file_collector(changedfileset):
1719 def manifest_and_file_collector(changedfileset):
1722 # This is an information gathering function that gathers
1720 # This is an information gathering function that gathers
1723 # information from each changeset node that goes out as part of
1721 # information from each changeset node that goes out as part of
1724 # the changegroup. The information gathered is a list of which
1722 # the changegroup. The information gathered is a list of which
1725 # manifest nodes are potentially required (the recipient may
1723 # manifest nodes are potentially required (the recipient may
1726 # already have them) and total list of all files which were
1724 # already have them) and total list of all files which were
1727 # changed in any changeset in the changegroup.
1725 # changed in any changeset in the changegroup.
1728 #
1726 #
1729 # We also remember the first changenode we saw any manifest
1727 # We also remember the first changenode we saw any manifest
1730 # referenced by so we can later determine which changenode 'owns'
1728 # referenced by so we can later determine which changenode 'owns'
1731 # the manifest.
1729 # the manifest.
1732 def collect_manifests_and_files(clnode):
1730 def collect_manifests_and_files(clnode):
1733 c = cl.read(clnode)
1731 c = cl.read(clnode)
1734 for f in c[3]:
1732 for f in c[3]:
1735 # This is to make sure we only have one instance of each
1733 # This is to make sure we only have one instance of each
1736 # filename string for each filename.
1734 # filename string for each filename.
1737 changedfileset.setdefault(f, f)
1735 changedfileset.setdefault(f, f)
1738 msng_mnfst_set.setdefault(c[0], clnode)
1736 msng_mnfst_set.setdefault(c[0], clnode)
1739 return collect_manifests_and_files
1737 return collect_manifests_and_files
1740
1738
1741 # Figure out which manifest nodes (of the ones we think might be part
1739 # Figure out which manifest nodes (of the ones we think might be part
1742 # of the changegroup) the recipient must know about and remove them
1740 # of the changegroup) the recipient must know about and remove them
1743 # from the changegroup.
1741 # from the changegroup.
1744 def prune_manifests():
1742 def prune_manifests():
1745 has_mnfst_set = set()
1743 has_mnfst_set = set()
1746 for n in msng_mnfst_set:
1744 for n in msng_mnfst_set:
1747 # If a 'missing' manifest thinks it belongs to a changenode
1745 # If a 'missing' manifest thinks it belongs to a changenode
1748 # the recipient is assumed to have, obviously the recipient
1746 # the recipient is assumed to have, obviously the recipient
1749 # must have that manifest.
1747 # must have that manifest.
1750 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1748 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1751 if linknode in has_cl_set:
1749 if linknode in has_cl_set:
1752 has_mnfst_set.add(n)
1750 has_mnfst_set.add(n)
1753 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1751 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1754
1752
1755 # Use the information collected in collect_manifests_and_files to say
1753 # Use the information collected in collect_manifests_and_files to say
1756 # which changenode any manifestnode belongs to.
1754 # which changenode any manifestnode belongs to.
1757 def lookup_manifest_link(mnfstnode):
1755 def lookup_manifest_link(mnfstnode):
1758 return msng_mnfst_set[mnfstnode]
1756 return msng_mnfst_set[mnfstnode]
1759
1757
1760 # A function generating function that sets up the initial environment
1758 # A function generating function that sets up the initial environment
1761 # the inner function.
1759 # the inner function.
1762 def filenode_collector(changedfiles):
1760 def filenode_collector(changedfiles):
1763 next_rev = [0]
1761 next_rev = [0]
1764 # This gathers information from each manifestnode included in the
1762 # This gathers information from each manifestnode included in the
1765 # changegroup about which filenodes the manifest node references
1763 # changegroup about which filenodes the manifest node references
1766 # so we can include those in the changegroup too.
1764 # so we can include those in the changegroup too.
1767 #
1765 #
1768 # It also remembers which changenode each filenode belongs to. It
1766 # It also remembers which changenode each filenode belongs to. It
1769 # does this by assuming the a filenode belongs to the changenode
1767 # does this by assuming the a filenode belongs to the changenode
1770 # the first manifest that references it belongs to.
1768 # the first manifest that references it belongs to.
1771 def collect_msng_filenodes(mnfstnode):
1769 def collect_msng_filenodes(mnfstnode):
1772 r = mnfst.rev(mnfstnode)
1770 r = mnfst.rev(mnfstnode)
1773 if r == next_rev[0]:
1771 if r == next_rev[0]:
1774 # If the last rev we looked at was the one just previous,
1772 # If the last rev we looked at was the one just previous,
1775 # we only need to see a diff.
1773 # we only need to see a diff.
1776 deltamf = mnfst.readdelta(mnfstnode)
1774 deltamf = mnfst.readdelta(mnfstnode)
1777 # For each line in the delta
1775 # For each line in the delta
1778 for f, fnode in deltamf.iteritems():
1776 for f, fnode in deltamf.iteritems():
1779 f = changedfiles.get(f, None)
1777 f = changedfiles.get(f, None)
1780 # And if the file is in the list of files we care
1778 # And if the file is in the list of files we care
1781 # about.
1779 # about.
1782 if f is not None:
1780 if f is not None:
1783 # Get the changenode this manifest belongs to
1781 # Get the changenode this manifest belongs to
1784 clnode = msng_mnfst_set[mnfstnode]
1782 clnode = msng_mnfst_set[mnfstnode]
1785 # Create the set of filenodes for the file if
1783 # Create the set of filenodes for the file if
1786 # there isn't one already.
1784 # there isn't one already.
1787 ndset = msng_filenode_set.setdefault(f, {})
1785 ndset = msng_filenode_set.setdefault(f, {})
1788 # And set the filenode's changelog node to the
1786 # And set the filenode's changelog node to the
1789 # manifest's if it hasn't been set already.
1787 # manifest's if it hasn't been set already.
1790 ndset.setdefault(fnode, clnode)
1788 ndset.setdefault(fnode, clnode)
1791 else:
1789 else:
1792 # Otherwise we need a full manifest.
1790 # Otherwise we need a full manifest.
1793 m = mnfst.read(mnfstnode)
1791 m = mnfst.read(mnfstnode)
1794 # For every file in we care about.
1792 # For every file in we care about.
1795 for f in changedfiles:
1793 for f in changedfiles:
1796 fnode = m.get(f, None)
1794 fnode = m.get(f, None)
1797 # If it's in the manifest
1795 # If it's in the manifest
1798 if fnode is not None:
1796 if fnode is not None:
1799 # See comments above.
1797 # See comments above.
1800 clnode = msng_mnfst_set[mnfstnode]
1798 clnode = msng_mnfst_set[mnfstnode]
1801 ndset = msng_filenode_set.setdefault(f, {})
1799 ndset = msng_filenode_set.setdefault(f, {})
1802 ndset.setdefault(fnode, clnode)
1800 ndset.setdefault(fnode, clnode)
1803 # Remember the revision we hope to see next.
1801 # Remember the revision we hope to see next.
1804 next_rev[0] = r + 1
1802 next_rev[0] = r + 1
1805 return collect_msng_filenodes
1803 return collect_msng_filenodes
1806
1804
1807 # We have a list of filenodes we think we need for a file, lets remove
1805 # We have a list of filenodes we think we need for a file, lets remove
1808 # all those we know the recipient must have.
1806 # all those we know the recipient must have.
1809 def prune_filenodes(f, filerevlog):
1807 def prune_filenodes(f, filerevlog):
1810 msngset = msng_filenode_set[f]
1808 msngset = msng_filenode_set[f]
1811 hasset = set()
1809 hasset = set()
1812 # If a 'missing' filenode thinks it belongs to a changenode we
1810 # If a 'missing' filenode thinks it belongs to a changenode we
1813 # assume the recipient must have, then the recipient must have
1811 # assume the recipient must have, then the recipient must have
1814 # that filenode.
1812 # that filenode.
1815 for n in msngset:
1813 for n in msngset:
1816 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1814 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1817 if clnode in has_cl_set:
1815 if clnode in has_cl_set:
1818 hasset.add(n)
1816 hasset.add(n)
1819 prune_parents(filerevlog, hasset, msngset)
1817 prune_parents(filerevlog, hasset, msngset)
1820
1818
1821 # A function generator function that sets up the a context for the
1819 # A function generator function that sets up the a context for the
1822 # inner function.
1820 # inner function.
1823 def lookup_filenode_link_func(fname):
1821 def lookup_filenode_link_func(fname):
1824 msngset = msng_filenode_set[fname]
1822 msngset = msng_filenode_set[fname]
1825 # Lookup the changenode the filenode belongs to.
1823 # Lookup the changenode the filenode belongs to.
1826 def lookup_filenode_link(fnode):
1824 def lookup_filenode_link(fnode):
1827 return msngset[fnode]
1825 return msngset[fnode]
1828 return lookup_filenode_link
1826 return lookup_filenode_link
1829
1827
1830 # Add the nodes that were explicitly requested.
1828 # Add the nodes that were explicitly requested.
1831 def add_extra_nodes(name, nodes):
1829 def add_extra_nodes(name, nodes):
1832 if not extranodes or name not in extranodes:
1830 if not extranodes or name not in extranodes:
1833 return
1831 return
1834
1832
1835 for node, linknode in extranodes[name]:
1833 for node, linknode in extranodes[name]:
1836 if node not in nodes:
1834 if node not in nodes:
1837 nodes[node] = linknode
1835 nodes[node] = linknode
1838
1836
1839 # Now that we have all theses utility functions to help out and
1837 # Now that we have all theses utility functions to help out and
1840 # logically divide up the task, generate the group.
1838 # logically divide up the task, generate the group.
1841 def gengroup():
1839 def gengroup():
1842 # The set of changed files starts empty.
1840 # The set of changed files starts empty.
1843 changedfiles = {}
1841 changedfiles = {}
1844 # Create a changenode group generator that will call our functions
1842 # Create a changenode group generator that will call our functions
1845 # back to lookup the owning changenode and collect information.
1843 # back to lookup the owning changenode and collect information.
1846 group = cl.group(msng_cl_lst, identity,
1844 group = cl.group(msng_cl_lst, identity,
1847 manifest_and_file_collector(changedfiles))
1845 manifest_and_file_collector(changedfiles))
1848 for chnk in group:
1846 for chnk in group:
1849 yield chnk
1847 yield chnk
1850
1848
1851 # The list of manifests has been collected by the generator
1849 # The list of manifests has been collected by the generator
1852 # calling our functions back.
1850 # calling our functions back.
1853 prune_manifests()
1851 prune_manifests()
1854 add_extra_nodes(1, msng_mnfst_set)
1852 add_extra_nodes(1, msng_mnfst_set)
1855 msng_mnfst_lst = msng_mnfst_set.keys()
1853 msng_mnfst_lst = msng_mnfst_set.keys()
1856 # Sort the manifestnodes by revision number.
1854 # Sort the manifestnodes by revision number.
1857 msng_mnfst_lst.sort(key=mnfst.rev)
1855 msng_mnfst_lst.sort(key=mnfst.rev)
1858 # Create a generator for the manifestnodes that calls our lookup
1856 # Create a generator for the manifestnodes that calls our lookup
1859 # and data collection functions back.
1857 # and data collection functions back.
1860 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1858 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1861 filenode_collector(changedfiles))
1859 filenode_collector(changedfiles))
1862 for chnk in group:
1860 for chnk in group:
1863 yield chnk
1861 yield chnk
1864
1862
1865 # These are no longer needed, dereference and toss the memory for
1863 # These are no longer needed, dereference and toss the memory for
1866 # them.
1864 # them.
1867 msng_mnfst_lst = None
1865 msng_mnfst_lst = None
1868 msng_mnfst_set.clear()
1866 msng_mnfst_set.clear()
1869
1867
1870 if extranodes:
1868 if extranodes:
1871 for fname in extranodes:
1869 for fname in extranodes:
1872 if isinstance(fname, int):
1870 if isinstance(fname, int):
1873 continue
1871 continue
1874 msng_filenode_set.setdefault(fname, {})
1872 msng_filenode_set.setdefault(fname, {})
1875 changedfiles[fname] = 1
1873 changedfiles[fname] = 1
1876 # Go through all our files in order sorted by name.
1874 # Go through all our files in order sorted by name.
1877 for fname in sorted(changedfiles):
1875 for fname in sorted(changedfiles):
1878 filerevlog = self.file(fname)
1876 filerevlog = self.file(fname)
1879 if not len(filerevlog):
1877 if not len(filerevlog):
1880 raise util.Abort(_("empty or missing revlog for %s") % fname)
1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1881 # Toss out the filenodes that the recipient isn't really
1879 # Toss out the filenodes that the recipient isn't really
1882 # missing.
1880 # missing.
1883 if fname in msng_filenode_set:
1881 if fname in msng_filenode_set:
1884 prune_filenodes(fname, filerevlog)
1882 prune_filenodes(fname, filerevlog)
1885 add_extra_nodes(fname, msng_filenode_set[fname])
1883 add_extra_nodes(fname, msng_filenode_set[fname])
1886 msng_filenode_lst = msng_filenode_set[fname].keys()
1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1887 else:
1885 else:
1888 msng_filenode_lst = []
1886 msng_filenode_lst = []
1889 # If any filenodes are left, generate the group for them,
1887 # If any filenodes are left, generate the group for them,
1890 # otherwise don't bother.
1888 # otherwise don't bother.
1891 if len(msng_filenode_lst) > 0:
1889 if len(msng_filenode_lst) > 0:
1892 yield changegroup.chunkheader(len(fname))
1890 yield changegroup.chunkheader(len(fname))
1893 yield fname
1891 yield fname
1894 # Sort the filenodes by their revision #
1892 # Sort the filenodes by their revision #
1895 msng_filenode_lst.sort(key=filerevlog.rev)
1893 msng_filenode_lst.sort(key=filerevlog.rev)
1896 # Create a group generator and only pass in a changenode
1894 # Create a group generator and only pass in a changenode
1897 # lookup function as we need to collect no information
1895 # lookup function as we need to collect no information
1898 # from filenodes.
1896 # from filenodes.
1899 group = filerevlog.group(msng_filenode_lst,
1897 group = filerevlog.group(msng_filenode_lst,
1900 lookup_filenode_link_func(fname))
1898 lookup_filenode_link_func(fname))
1901 for chnk in group:
1899 for chnk in group:
1902 yield chnk
1900 yield chnk
1903 if fname in msng_filenode_set:
1901 if fname in msng_filenode_set:
1904 # Don't need this anymore, toss it to free memory.
1902 # Don't need this anymore, toss it to free memory.
1905 del msng_filenode_set[fname]
1903 del msng_filenode_set[fname]
1906 # Signal that no more groups are left.
1904 # Signal that no more groups are left.
1907 yield changegroup.closechunk()
1905 yield changegroup.closechunk()
1908
1906
1909 if msng_cl_lst:
1907 if msng_cl_lst:
1910 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1908 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1911
1909
1912 return util.chunkbuffer(gengroup())
1910 return util.chunkbuffer(gengroup())
1913
1911
1914 def changegroup(self, basenodes, source):
1912 def changegroup(self, basenodes, source):
1915 # to avoid a race we use changegroupsubset() (issue1320)
1913 # to avoid a race we use changegroupsubset() (issue1320)
1916 return self.changegroupsubset(basenodes, self.heads(), source)
1914 return self.changegroupsubset(basenodes, self.heads(), source)
1917
1915
1918 def _changegroup(self, common, source):
1916 def _changegroup(self, common, source):
1919 """Compute the changegroup of all nodes that we have that a recipient
1917 """Compute the changegroup of all nodes that we have that a recipient
1920 doesn't. Return a chunkbuffer object whose read() method will return
1918 doesn't. Return a chunkbuffer object whose read() method will return
1921 successive changegroup chunks.
1919 successive changegroup chunks.
1922
1920
1923 This is much easier than the previous function as we can assume that
1921 This is much easier than the previous function as we can assume that
1924 the recipient has any changenode we aren't sending them.
1922 the recipient has any changenode we aren't sending them.
1925
1923
1926 common is the set of common nodes between remote and self"""
1924 common is the set of common nodes between remote and self"""
1927
1925
1928 self.hook('preoutgoing', throw=True, source=source)
1926 self.hook('preoutgoing', throw=True, source=source)
1929
1927
1930 cl = self.changelog
1928 cl = self.changelog
1931 nodes = cl.findmissing(common)
1929 nodes = cl.findmissing(common)
1932 revset = set([cl.rev(n) for n in nodes])
1930 revset = set([cl.rev(n) for n in nodes])
1933 self.changegroupinfo(nodes, source)
1931 self.changegroupinfo(nodes, source)
1934
1932
1935 def identity(x):
1933 def identity(x):
1936 return x
1934 return x
1937
1935
1938 def gennodelst(log):
1936 def gennodelst(log):
1939 for r in log:
1937 for r in log:
1940 if log.linkrev(r) in revset:
1938 if log.linkrev(r) in revset:
1941 yield log.node(r)
1939 yield log.node(r)
1942
1940
1943 def changed_file_collector(changedfileset):
1941 def changed_file_collector(changedfileset):
1944 def collect_changed_files(clnode):
1942 def collect_changed_files(clnode):
1945 c = cl.read(clnode)
1943 c = cl.read(clnode)
1946 changedfileset.update(c[3])
1944 changedfileset.update(c[3])
1947 return collect_changed_files
1945 return collect_changed_files
1948
1946
1949 def lookuprevlink_func(revlog):
1947 def lookuprevlink_func(revlog):
1950 def lookuprevlink(n):
1948 def lookuprevlink(n):
1951 return cl.node(revlog.linkrev(revlog.rev(n)))
1949 return cl.node(revlog.linkrev(revlog.rev(n)))
1952 return lookuprevlink
1950 return lookuprevlink
1953
1951
1954 def gengroup():
1952 def gengroup():
1955 '''yield a sequence of changegroup chunks (strings)'''
1953 '''yield a sequence of changegroup chunks (strings)'''
1956 # construct a list of all changed files
1954 # construct a list of all changed files
1957 changedfiles = set()
1955 changedfiles = set()
1958
1956
1959 for chnk in cl.group(nodes, identity,
1957 for chnk in cl.group(nodes, identity,
1960 changed_file_collector(changedfiles)):
1958 changed_file_collector(changedfiles)):
1961 yield chnk
1959 yield chnk
1962
1960
1963 mnfst = self.manifest
1961 mnfst = self.manifest
1964 nodeiter = gennodelst(mnfst)
1962 nodeiter = gennodelst(mnfst)
1965 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1963 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1966 yield chnk
1964 yield chnk
1967
1965
1968 for fname in sorted(changedfiles):
1966 for fname in sorted(changedfiles):
1969 filerevlog = self.file(fname)
1967 filerevlog = self.file(fname)
1970 if not len(filerevlog):
1968 if not len(filerevlog):
1971 raise util.Abort(_("empty or missing revlog for %s") % fname)
1969 raise util.Abort(_("empty or missing revlog for %s") % fname)
1972 nodeiter = gennodelst(filerevlog)
1970 nodeiter = gennodelst(filerevlog)
1973 nodeiter = list(nodeiter)
1971 nodeiter = list(nodeiter)
1974 if nodeiter:
1972 if nodeiter:
1975 yield changegroup.chunkheader(len(fname))
1973 yield changegroup.chunkheader(len(fname))
1976 yield fname
1974 yield fname
1977 lookup = lookuprevlink_func(filerevlog)
1975 lookup = lookuprevlink_func(filerevlog)
1978 for chnk in filerevlog.group(nodeiter, lookup):
1976 for chnk in filerevlog.group(nodeiter, lookup):
1979 yield chnk
1977 yield chnk
1980
1978
1981 yield changegroup.closechunk()
1979 yield changegroup.closechunk()
1982
1980
1983 if nodes:
1981 if nodes:
1984 self.hook('outgoing', node=hex(nodes[0]), source=source)
1982 self.hook('outgoing', node=hex(nodes[0]), source=source)
1985
1983
1986 return util.chunkbuffer(gengroup())
1984 return util.chunkbuffer(gengroup())
1987
1985
1988 def addchangegroup(self, source, srctype, url, emptyok=False):
1986 def addchangegroup(self, source, srctype, url, emptyok=False):
1989 """add changegroup to repo.
1987 """add changegroup to repo.
1990
1988
1991 return values:
1989 return values:
1992 - nothing changed or no source: 0
1990 - nothing changed or no source: 0
1993 - more heads than before: 1+added heads (2..n)
1991 - more heads than before: 1+added heads (2..n)
1994 - less heads than before: -1-removed heads (-2..-n)
1992 - less heads than before: -1-removed heads (-2..-n)
1995 - number of heads stays the same: 1
1993 - number of heads stays the same: 1
1996 """
1994 """
1997 def csmap(x):
1995 def csmap(x):
1998 self.ui.debug("add changeset %s\n" % short(x))
1996 self.ui.debug("add changeset %s\n" % short(x))
1999 return len(cl)
1997 return len(cl)
2000
1998
2001 def revmap(x):
1999 def revmap(x):
2002 return cl.rev(x)
2000 return cl.rev(x)
2003
2001
2004 if not source:
2002 if not source:
2005 return 0
2003 return 0
2006
2004
2007 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2005 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2008
2006
2009 changesets = files = revisions = 0
2007 changesets = files = revisions = 0
2010
2008
2011 # write changelog data to temp files so concurrent readers will not see
2009 # write changelog data to temp files so concurrent readers will not see
2012 # inconsistent view
2010 # inconsistent view
2013 cl = self.changelog
2011 cl = self.changelog
2014 cl.delayupdate()
2012 cl.delayupdate()
2015 oldheads = len(cl.heads())
2013 oldheads = len(cl.heads())
2016
2014
2017 tr = self.transaction()
2015 tr = self.transaction()
2018 try:
2016 try:
2019 trp = weakref.proxy(tr)
2017 trp = weakref.proxy(tr)
2020 # pull off the changeset group
2018 # pull off the changeset group
2021 self.ui.status(_("adding changesets\n"))
2019 self.ui.status(_("adding changesets\n"))
2022 clstart = len(cl)
2020 clstart = len(cl)
2023 chunkiter = changegroup.chunkiter(source)
2021 chunkiter = changegroup.chunkiter(source)
2024 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2022 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2025 raise util.Abort(_("received changelog group is empty"))
2023 raise util.Abort(_("received changelog group is empty"))
2026 clend = len(cl)
2024 clend = len(cl)
2027 changesets = clend - clstart
2025 changesets = clend - clstart
2028
2026
2029 # pull off the manifest group
2027 # pull off the manifest group
2030 self.ui.status(_("adding manifests\n"))
2028 self.ui.status(_("adding manifests\n"))
2031 chunkiter = changegroup.chunkiter(source)
2029 chunkiter = changegroup.chunkiter(source)
2032 # no need to check for empty manifest group here:
2030 # no need to check for empty manifest group here:
2033 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2031 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2034 # no new manifest will be created and the manifest group will
2032 # no new manifest will be created and the manifest group will
2035 # be empty during the pull
2033 # be empty during the pull
2036 self.manifest.addgroup(chunkiter, revmap, trp)
2034 self.manifest.addgroup(chunkiter, revmap, trp)
2037
2035
2038 # process the files
2036 # process the files
2039 self.ui.status(_("adding file changes\n"))
2037 self.ui.status(_("adding file changes\n"))
2040 while 1:
2038 while 1:
2041 f = changegroup.getchunk(source)
2039 f = changegroup.getchunk(source)
2042 if not f:
2040 if not f:
2043 break
2041 break
2044 self.ui.debug("adding %s revisions\n" % f)
2042 self.ui.debug("adding %s revisions\n" % f)
2045 fl = self.file(f)
2043 fl = self.file(f)
2046 o = len(fl)
2044 o = len(fl)
2047 chunkiter = changegroup.chunkiter(source)
2045 chunkiter = changegroup.chunkiter(source)
2048 if fl.addgroup(chunkiter, revmap, trp) is None:
2046 if fl.addgroup(chunkiter, revmap, trp) is None:
2049 raise util.Abort(_("received file revlog group is empty"))
2047 raise util.Abort(_("received file revlog group is empty"))
2050 revisions += len(fl) - o
2048 revisions += len(fl) - o
2051 files += 1
2049 files += 1
2052
2050
2053 newheads = len(cl.heads())
2051 newheads = len(cl.heads())
2054 heads = ""
2052 heads = ""
2055 if oldheads and newheads != oldheads:
2053 if oldheads and newheads != oldheads:
2056 heads = _(" (%+d heads)") % (newheads - oldheads)
2054 heads = _(" (%+d heads)") % (newheads - oldheads)
2057
2055
2058 self.ui.status(_("added %d changesets"
2056 self.ui.status(_("added %d changesets"
2059 " with %d changes to %d files%s\n")
2057 " with %d changes to %d files%s\n")
2060 % (changesets, revisions, files, heads))
2058 % (changesets, revisions, files, heads))
2061
2059
2062 if changesets > 0:
2060 if changesets > 0:
2063 p = lambda: cl.writepending() and self.root or ""
2061 p = lambda: cl.writepending() and self.root or ""
2064 self.hook('pretxnchangegroup', throw=True,
2062 self.hook('pretxnchangegroup', throw=True,
2065 node=hex(cl.node(clstart)), source=srctype,
2063 node=hex(cl.node(clstart)), source=srctype,
2066 url=url, pending=p)
2064 url=url, pending=p)
2067
2065
2068 # make changelog see real files again
2066 # make changelog see real files again
2069 cl.finalize(trp)
2067 cl.finalize(trp)
2070
2068
2071 tr.close()
2069 tr.close()
2072 finally:
2070 finally:
2073 del tr
2071 del tr
2074
2072
2075 if changesets > 0:
2073 if changesets > 0:
2076 # forcefully update the on-disk branch cache
2074 # forcefully update the on-disk branch cache
2077 self.ui.debug("updating the branch cache\n")
2075 self.ui.debug("updating the branch cache\n")
2078 self.branchtags()
2076 self.branchtags()
2079 self.hook("changegroup", node=hex(cl.node(clstart)),
2077 self.hook("changegroup", node=hex(cl.node(clstart)),
2080 source=srctype, url=url)
2078 source=srctype, url=url)
2081
2079
2082 for i in xrange(clstart, clend):
2080 for i in xrange(clstart, clend):
2083 self.hook("incoming", node=hex(cl.node(i)),
2081 self.hook("incoming", node=hex(cl.node(i)),
2084 source=srctype, url=url)
2082 source=srctype, url=url)
2085
2083
2086 # never return 0 here:
2084 # never return 0 here:
2087 if newheads < oldheads:
2085 if newheads < oldheads:
2088 return newheads - oldheads - 1
2086 return newheads - oldheads - 1
2089 else:
2087 else:
2090 return newheads - oldheads + 1
2088 return newheads - oldheads + 1
2091
2089
2092
2090
2093 def stream_in(self, remote):
2091 def stream_in(self, remote):
2094 fp = remote.stream_out()
2092 fp = remote.stream_out()
2095 l = fp.readline()
2093 l = fp.readline()
2096 try:
2094 try:
2097 resp = int(l)
2095 resp = int(l)
2098 except ValueError:
2096 except ValueError:
2099 raise error.ResponseError(
2097 raise error.ResponseError(
2100 _('Unexpected response from remote server:'), l)
2098 _('Unexpected response from remote server:'), l)
2101 if resp == 1:
2099 if resp == 1:
2102 raise util.Abort(_('operation forbidden by server'))
2100 raise util.Abort(_('operation forbidden by server'))
2103 elif resp == 2:
2101 elif resp == 2:
2104 raise util.Abort(_('locking the remote repository failed'))
2102 raise util.Abort(_('locking the remote repository failed'))
2105 elif resp != 0:
2103 elif resp != 0:
2106 raise util.Abort(_('the server sent an unknown error code'))
2104 raise util.Abort(_('the server sent an unknown error code'))
2107 self.ui.status(_('streaming all changes\n'))
2105 self.ui.status(_('streaming all changes\n'))
2108 l = fp.readline()
2106 l = fp.readline()
2109 try:
2107 try:
2110 total_files, total_bytes = map(int, l.split(' ', 1))
2108 total_files, total_bytes = map(int, l.split(' ', 1))
2111 except (ValueError, TypeError):
2109 except (ValueError, TypeError):
2112 raise error.ResponseError(
2110 raise error.ResponseError(
2113 _('Unexpected response from remote server:'), l)
2111 _('Unexpected response from remote server:'), l)
2114 self.ui.status(_('%d files to transfer, %s of data\n') %
2112 self.ui.status(_('%d files to transfer, %s of data\n') %
2115 (total_files, util.bytecount(total_bytes)))
2113 (total_files, util.bytecount(total_bytes)))
2116 start = time.time()
2114 start = time.time()
2117 for i in xrange(total_files):
2115 for i in xrange(total_files):
2118 # XXX doesn't support '\n' or '\r' in filenames
2116 # XXX doesn't support '\n' or '\r' in filenames
2119 l = fp.readline()
2117 l = fp.readline()
2120 try:
2118 try:
2121 name, size = l.split('\0', 1)
2119 name, size = l.split('\0', 1)
2122 size = int(size)
2120 size = int(size)
2123 except (ValueError, TypeError):
2121 except (ValueError, TypeError):
2124 raise error.ResponseError(
2122 raise error.ResponseError(
2125 _('Unexpected response from remote server:'), l)
2123 _('Unexpected response from remote server:'), l)
2126 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2124 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2127 # for backwards compat, name was partially encoded
2125 # for backwards compat, name was partially encoded
2128 ofp = self.sopener(store.decodedir(name), 'w')
2126 ofp = self.sopener(store.decodedir(name), 'w')
2129 for chunk in util.filechunkiter(fp, limit=size):
2127 for chunk in util.filechunkiter(fp, limit=size):
2130 ofp.write(chunk)
2128 ofp.write(chunk)
2131 ofp.close()
2129 ofp.close()
2132 elapsed = time.time() - start
2130 elapsed = time.time() - start
2133 if elapsed <= 0:
2131 if elapsed <= 0:
2134 elapsed = 0.001
2132 elapsed = 0.001
2135 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2133 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2136 (util.bytecount(total_bytes), elapsed,
2134 (util.bytecount(total_bytes), elapsed,
2137 util.bytecount(total_bytes / elapsed)))
2135 util.bytecount(total_bytes / elapsed)))
2138 self.invalidate()
2136 self.invalidate()
2139 return len(self.heads()) + 1
2137 return len(self.heads()) + 1
2140
2138
2141 def clone(self, remote, heads=[], stream=False):
2139 def clone(self, remote, heads=[], stream=False):
2142 '''clone remote repository.
2140 '''clone remote repository.
2143
2141
2144 keyword arguments:
2142 keyword arguments:
2145 heads: list of revs to clone (forces use of pull)
2143 heads: list of revs to clone (forces use of pull)
2146 stream: use streaming clone if possible'''
2144 stream: use streaming clone if possible'''
2147
2145
2148 # now, all clients that can request uncompressed clones can
2146 # now, all clients that can request uncompressed clones can
2149 # read repo formats supported by all servers that can serve
2147 # read repo formats supported by all servers that can serve
2150 # them.
2148 # them.
2151
2149
2152 # if revlog format changes, client will have to check version
2150 # if revlog format changes, client will have to check version
2153 # and format flags on "stream" capability, and use
2151 # and format flags on "stream" capability, and use
2154 # uncompressed only if compatible.
2152 # uncompressed only if compatible.
2155
2153
2156 if stream and not heads and remote.capable('stream'):
2154 if stream and not heads and remote.capable('stream'):
2157 return self.stream_in(remote)
2155 return self.stream_in(remote)
2158 return self.pull(remote, heads)
2156 return self.pull(remote, heads)
2159
2157
2160 # used to avoid circular references so destructors work
2158 # used to avoid circular references so destructors work
2161 def aftertrans(files):
2159 def aftertrans(files):
2162 renamefiles = [tuple(t) for t in files]
2160 renamefiles = [tuple(t) for t in files]
2163 def a():
2161 def a():
2164 for src, dest in renamefiles:
2162 for src, dest in renamefiles:
2165 util.rename(src, dest)
2163 util.rename(src, dest)
2166 return a
2164 return a
2167
2165
2168 def instance(ui, path, create):
2166 def instance(ui, path, create):
2169 return localrepository(ui, util.drop_scheme('file', path), create)
2167 return localrepository(ui, util.drop_scheme('file', path), create)
2170
2168
2171 def islocal(path):
2169 def islocal(path):
2172 return True
2170 return True
General Comments 0
You need to be logged in to leave comments. Login now