##// END OF EJS Templates
transfer branchmap branch names over the wire in utf-8
Henrik Stuart -
r9671:9471d9a9 default
parent child Browse files
Show More
@@ -0,0 +1,23 b''
1 #!/bin/sh
2
3 hgserve()
4 {
5 hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid -E errors.log -v $@ \
6 | sed -e 's/:[0-9][0-9]*//g' -e 's/http:\/\/[^/]*\//http:\/\/localhost\//'
7 cat hg.pid >> "$DAEMON_PIDS"
8 }
9
10 hg init a
11 hg --encoding utf-8 -R a branch Γ¦
12 echo foo > a/foo
13 hg -R a ci -Am foo
14
15 hgserve -R a --config web.push_ssl=False --config web.allow_push=* --encoding latin1
16 hg clone http://localhost:$HGPORT1 b
17 hg --encoding utf-8 -R b log
18 echo bar >> b/foo
19 hg -R b ci -m bar
20 hg --encoding utf-8 -R b push | sed "s/$HGPORT1/PORT/"
21 hg -R a --encoding utf-8 log
22
23 kill `cat hg.pid`
@@ -0,0 +1,36 b''
1 marked working directory as branch Γ¦
2 adding foo
3 listening at http://localhost/ (bound to 127.0.0.1)
4 requesting all changes
5 adding changesets
6 adding manifests
7 adding file changes
8 added 1 changesets with 1 changes to 1 files
9 updating working directory
10 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
11 changeset: 0:867c11ce77b8
12 branch: Γ¦
13 tag: tip
14 user: test
15 date: Thu Jan 01 00:00:00 1970 +0000
16 summary: foo
17
18 pushing to http://localhost:PORT
19 searching for changes
20 adding changesets
21 adding manifests
22 adding file changes
23 added 1 changesets with 1 changes to 1 files
24 changeset: 1:58e7c90d67cb
25 branch: Γ¦
26 tag: tip
27 user: test
28 date: Thu Jan 01 00:00:00 1970 +0000
29 summary: bar
30
31 changeset: 0:867c11ce77b8
32 branch: Γ¦
33 user: test
34 date: Thu Jan 01 00:00:00 1970 +0000
35 summary: foo
36
@@ -1,2171 +1,2179 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self.branchcache = None
101 self.branchcache = None
102 self._ubranchcache = None # UTF-8 version of branchcache
102 self._ubranchcache = None # UTF-8 version of branchcache
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.defversion = c.version
116 self.sopener.defversion = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __nonzero__(self):
132 def __nonzero__(self):
133 return True
133 return True
134
134
135 def __len__(self):
135 def __len__(self):
136 return len(self.changelog)
136 return len(self.changelog)
137
137
138 def __iter__(self):
138 def __iter__(self):
139 for i in xrange(len(self)):
139 for i in xrange(len(self)):
140 yield i
140 yield i
141
141
142 def url(self):
142 def url(self):
143 return 'file:' + self.root
143 return 'file:' + self.root
144
144
145 def hook(self, name, throw=False, **args):
145 def hook(self, name, throw=False, **args):
146 return hook.hook(self.ui, self, name, throw, **args)
146 return hook.hook(self.ui, self, name, throw, **args)
147
147
148 tag_disallowed = ':\r\n'
148 tag_disallowed = ':\r\n'
149
149
150 def _tag(self, names, node, message, local, user, date, extra={}):
150 def _tag(self, names, node, message, local, user, date, extra={}):
151 if isinstance(names, str):
151 if isinstance(names, str):
152 allchars = names
152 allchars = names
153 names = (names,)
153 names = (names,)
154 else:
154 else:
155 allchars = ''.join(names)
155 allchars = ''.join(names)
156 for c in self.tag_disallowed:
156 for c in self.tag_disallowed:
157 if c in allchars:
157 if c in allchars:
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
158 raise util.Abort(_('%r cannot be used in a tag name') % c)
159
159
160 for name in names:
160 for name in names:
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 self.hook('pretag', throw=True, node=hex(node), tag=name,
162 local=local)
162 local=local)
163
163
164 def writetags(fp, names, munge, prevtags):
164 def writetags(fp, names, munge, prevtags):
165 fp.seek(0, 2)
165 fp.seek(0, 2)
166 if prevtags and prevtags[-1] != '\n':
166 if prevtags and prevtags[-1] != '\n':
167 fp.write('\n')
167 fp.write('\n')
168 for name in names:
168 for name in names:
169 m = munge and munge(name) or name
169 m = munge and munge(name) or name
170 if self._tagtypes and name in self._tagtypes:
170 if self._tagtypes and name in self._tagtypes:
171 old = self._tags.get(name, nullid)
171 old = self._tags.get(name, nullid)
172 fp.write('%s %s\n' % (hex(old), m))
172 fp.write('%s %s\n' % (hex(old), m))
173 fp.write('%s %s\n' % (hex(node), m))
173 fp.write('%s %s\n' % (hex(node), m))
174 fp.close()
174 fp.close()
175
175
176 prevtags = ''
176 prevtags = ''
177 if local:
177 if local:
178 try:
178 try:
179 fp = self.opener('localtags', 'r+')
179 fp = self.opener('localtags', 'r+')
180 except IOError:
180 except IOError:
181 fp = self.opener('localtags', 'a')
181 fp = self.opener('localtags', 'a')
182 else:
182 else:
183 prevtags = fp.read()
183 prevtags = fp.read()
184
184
185 # local tags are stored in the current charset
185 # local tags are stored in the current charset
186 writetags(fp, names, None, prevtags)
186 writetags(fp, names, None, prevtags)
187 for name in names:
187 for name in names:
188 self.hook('tag', node=hex(node), tag=name, local=local)
188 self.hook('tag', node=hex(node), tag=name, local=local)
189 return
189 return
190
190
191 try:
191 try:
192 fp = self.wfile('.hgtags', 'rb+')
192 fp = self.wfile('.hgtags', 'rb+')
193 except IOError:
193 except IOError:
194 fp = self.wfile('.hgtags', 'ab')
194 fp = self.wfile('.hgtags', 'ab')
195 else:
195 else:
196 prevtags = fp.read()
196 prevtags = fp.read()
197
197
198 # committed tags are stored in UTF-8
198 # committed tags are stored in UTF-8
199 writetags(fp, names, encoding.fromlocal, prevtags)
199 writetags(fp, names, encoding.fromlocal, prevtags)
200
200
201 if '.hgtags' not in self.dirstate:
201 if '.hgtags' not in self.dirstate:
202 self.add(['.hgtags'])
202 self.add(['.hgtags'])
203
203
204 m = match_.exact(self.root, '', ['.hgtags'])
204 m = match_.exact(self.root, '', ['.hgtags'])
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
205 tagnode = self.commit(message, user, date, extra=extra, match=m)
206
206
207 for name in names:
207 for name in names:
208 self.hook('tag', node=hex(node), tag=name, local=local)
208 self.hook('tag', node=hex(node), tag=name, local=local)
209
209
210 return tagnode
210 return tagnode
211
211
212 def tag(self, names, node, message, local, user, date):
212 def tag(self, names, node, message, local, user, date):
213 '''tag a revision with one or more symbolic names.
213 '''tag a revision with one or more symbolic names.
214
214
215 names is a list of strings or, when adding a single tag, names may be a
215 names is a list of strings or, when adding a single tag, names may be a
216 string.
216 string.
217
217
218 if local is True, the tags are stored in a per-repository file.
218 if local is True, the tags are stored in a per-repository file.
219 otherwise, they are stored in the .hgtags file, and a new
219 otherwise, they are stored in the .hgtags file, and a new
220 changeset is committed with the change.
220 changeset is committed with the change.
221
221
222 keyword arguments:
222 keyword arguments:
223
223
224 local: whether to store tags in non-version-controlled file
224 local: whether to store tags in non-version-controlled file
225 (default False)
225 (default False)
226
226
227 message: commit message to use if committing
227 message: commit message to use if committing
228
228
229 user: name of user to use if committing
229 user: name of user to use if committing
230
230
231 date: date tuple to use if committing'''
231 date: date tuple to use if committing'''
232
232
233 for x in self.status()[:5]:
233 for x in self.status()[:5]:
234 if '.hgtags' in x:
234 if '.hgtags' in x:
235 raise util.Abort(_('working copy of .hgtags is changed '
235 raise util.Abort(_('working copy of .hgtags is changed '
236 '(please commit .hgtags manually)'))
236 '(please commit .hgtags manually)'))
237
237
238 self.tags() # instantiate the cache
238 self.tags() # instantiate the cache
239 self._tag(names, node, message, local, user, date)
239 self._tag(names, node, message, local, user, date)
240
240
241 def tags(self):
241 def tags(self):
242 '''return a mapping of tag to node'''
242 '''return a mapping of tag to node'''
243 if self._tags is None:
243 if self._tags is None:
244 (self._tags, self._tagtypes) = self._findtags()
244 (self._tags, self._tagtypes) = self._findtags()
245
245
246 return self._tags
246 return self._tags
247
247
248 def _findtags(self):
248 def _findtags(self):
249 '''Do the hard work of finding tags. Return a pair of dicts
249 '''Do the hard work of finding tags. Return a pair of dicts
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 (tags, tagtypes) where tags maps tag name to node, and tagtypes
251 maps tag name to a string like \'global\' or \'local\'.
251 maps tag name to a string like \'global\' or \'local\'.
252 Subclasses or extensions are free to add their own tags, but
252 Subclasses or extensions are free to add their own tags, but
253 should be aware that the returned dicts will be retained for the
253 should be aware that the returned dicts will be retained for the
254 duration of the localrepo object.'''
254 duration of the localrepo object.'''
255
255
256 # XXX what tagtype should subclasses/extensions use? Currently
256 # XXX what tagtype should subclasses/extensions use? Currently
257 # mq and bookmarks add tags, but do not set the tagtype at all.
257 # mq and bookmarks add tags, but do not set the tagtype at all.
258 # Should each extension invent its own tag type? Should there
258 # Should each extension invent its own tag type? Should there
259 # be one tagtype for all such "virtual" tags? Or is the status
259 # be one tagtype for all such "virtual" tags? Or is the status
260 # quo fine?
260 # quo fine?
261
261
262 alltags = {} # map tag name to (node, hist)
262 alltags = {} # map tag name to (node, hist)
263 tagtypes = {}
263 tagtypes = {}
264
264
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
265 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
266 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
267
267
268 # Build the return dicts. Have to re-encode tag names because
268 # Build the return dicts. Have to re-encode tag names because
269 # the tags module always uses UTF-8 (in order not to lose info
269 # the tags module always uses UTF-8 (in order not to lose info
270 # writing to the cache), but the rest of Mercurial wants them in
270 # writing to the cache), but the rest of Mercurial wants them in
271 # local encoding.
271 # local encoding.
272 tags = {}
272 tags = {}
273 for (name, (node, hist)) in alltags.iteritems():
273 for (name, (node, hist)) in alltags.iteritems():
274 if node != nullid:
274 if node != nullid:
275 tags[encoding.tolocal(name)] = node
275 tags[encoding.tolocal(name)] = node
276 tags['tip'] = self.changelog.tip()
276 tags['tip'] = self.changelog.tip()
277 tagtypes = dict([(encoding.tolocal(name), value)
277 tagtypes = dict([(encoding.tolocal(name), value)
278 for (name, value) in tagtypes.iteritems()])
278 for (name, value) in tagtypes.iteritems()])
279 return (tags, tagtypes)
279 return (tags, tagtypes)
280
280
281 def tagtype(self, tagname):
281 def tagtype(self, tagname):
282 '''
282 '''
283 return the type of the given tag. result can be:
283 return the type of the given tag. result can be:
284
284
285 'local' : a local tag
285 'local' : a local tag
286 'global' : a global tag
286 'global' : a global tag
287 None : tag does not exist
287 None : tag does not exist
288 '''
288 '''
289
289
290 self.tags()
290 self.tags()
291
291
292 return self._tagtypes.get(tagname)
292 return self._tagtypes.get(tagname)
293
293
294 def tagslist(self):
294 def tagslist(self):
295 '''return a list of tags ordered by revision'''
295 '''return a list of tags ordered by revision'''
296 l = []
296 l = []
297 for t, n in self.tags().iteritems():
297 for t, n in self.tags().iteritems():
298 try:
298 try:
299 r = self.changelog.rev(n)
299 r = self.changelog.rev(n)
300 except:
300 except:
301 r = -2 # sort to the beginning of the list if unknown
301 r = -2 # sort to the beginning of the list if unknown
302 l.append((r, t, n))
302 l.append((r, t, n))
303 return [(t, n) for r, t, n in sorted(l)]
303 return [(t, n) for r, t, n in sorted(l)]
304
304
305 def nodetags(self, node):
305 def nodetags(self, node):
306 '''return the tags associated with a node'''
306 '''return the tags associated with a node'''
307 if not self.nodetagscache:
307 if not self.nodetagscache:
308 self.nodetagscache = {}
308 self.nodetagscache = {}
309 for t, n in self.tags().iteritems():
309 for t, n in self.tags().iteritems():
310 self.nodetagscache.setdefault(n, []).append(t)
310 self.nodetagscache.setdefault(n, []).append(t)
311 return self.nodetagscache.get(node, [])
311 return self.nodetagscache.get(node, [])
312
312
313 def _branchtags(self, partial, lrev):
313 def _branchtags(self, partial, lrev):
314 # TODO: rename this function?
314 # TODO: rename this function?
315 tiprev = len(self) - 1
315 tiprev = len(self) - 1
316 if lrev != tiprev:
316 if lrev != tiprev:
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319
319
320 return partial
320 return partial
321
321
322 def branchmap(self):
322 def lbranchmap(self):
323 tip = self.changelog.tip()
323 tip = self.changelog.tip()
324 if self.branchcache is not None and self._branchcachetip == tip:
324 if self.branchcache is not None and self._branchcachetip == tip:
325 return self.branchcache
325 return self.branchcache
326
326
327 partial = self.branchmap()
328
329 # the branch cache is stored on disk as UTF-8, but in the local
330 # charset internally
331 for k, v in partial.iteritems():
332 self.branchcache[encoding.tolocal(k)] = v
333 return self.branchcache
334
335 def branchmap(self):
336 tip = self.changelog.tip()
337 if self._ubranchcache is not None and self._branchcachetip == tip:
338 return self._ubranchcache
339
327 oldtip = self._branchcachetip
340 oldtip = self._branchcachetip
328 self._branchcachetip = tip
341 self._branchcachetip = tip
329 if self.branchcache is None:
342 if self.branchcache is None:
330 self.branchcache = {} # avoid recursion in changectx
343 self.branchcache = {} # avoid recursion in changectx
331 else:
344 else:
332 self.branchcache.clear() # keep using the same dict
345 self.branchcache.clear() # keep using the same dict
333 if oldtip is None or oldtip not in self.changelog.nodemap:
346 if oldtip is None or oldtip not in self.changelog.nodemap:
334 partial, last, lrev = self._readbranchcache()
347 partial, last, lrev = self._readbranchcache()
335 else:
348 else:
336 lrev = self.changelog.rev(oldtip)
349 lrev = self.changelog.rev(oldtip)
337 partial = self._ubranchcache
350 partial = self._ubranchcache
338
351
339 self._branchtags(partial, lrev)
352 self._branchtags(partial, lrev)
340 # this private cache holds all heads (not just tips)
353 # this private cache holds all heads (not just tips)
341 self._ubranchcache = partial
354 self._ubranchcache = partial
342
355
343 # the branch cache is stored on disk as UTF-8, but in the local
356 return self._ubranchcache
344 # charset internally
345 for k, v in partial.iteritems():
346 self.branchcache[encoding.tolocal(k)] = v
347 return self.branchcache
348
349
357
350 def branchtags(self):
358 def branchtags(self):
351 '''return a dict where branch names map to the tipmost head of
359 '''return a dict where branch names map to the tipmost head of
352 the branch, open heads come before closed'''
360 the branch, open heads come before closed'''
353 bt = {}
361 bt = {}
354 for bn, heads in self.branchmap().iteritems():
362 for bn, heads in self.lbranchmap().iteritems():
355 head = None
363 head = None
356 for i in range(len(heads)-1, -1, -1):
364 for i in range(len(heads)-1, -1, -1):
357 h = heads[i]
365 h = heads[i]
358 if 'close' not in self.changelog.read(h)[5]:
366 if 'close' not in self.changelog.read(h)[5]:
359 head = h
367 head = h
360 break
368 break
361 # no open heads were found
369 # no open heads were found
362 if head is None:
370 if head is None:
363 head = heads[-1]
371 head = heads[-1]
364 bt[bn] = head
372 bt[bn] = head
365 return bt
373 return bt
366
374
367
375
368 def _readbranchcache(self):
376 def _readbranchcache(self):
369 partial = {}
377 partial = {}
370 try:
378 try:
371 f = self.opener("branchheads.cache")
379 f = self.opener("branchheads.cache")
372 lines = f.read().split('\n')
380 lines = f.read().split('\n')
373 f.close()
381 f.close()
374 except (IOError, OSError):
382 except (IOError, OSError):
375 return {}, nullid, nullrev
383 return {}, nullid, nullrev
376
384
377 try:
385 try:
378 last, lrev = lines.pop(0).split(" ", 1)
386 last, lrev = lines.pop(0).split(" ", 1)
379 last, lrev = bin(last), int(lrev)
387 last, lrev = bin(last), int(lrev)
380 if lrev >= len(self) or self[lrev].node() != last:
388 if lrev >= len(self) or self[lrev].node() != last:
381 # invalidate the cache
389 # invalidate the cache
382 raise ValueError('invalidating branch cache (tip differs)')
390 raise ValueError('invalidating branch cache (tip differs)')
383 for l in lines:
391 for l in lines:
384 if not l: continue
392 if not l: continue
385 node, label = l.split(" ", 1)
393 node, label = l.split(" ", 1)
386 partial.setdefault(label.strip(), []).append(bin(node))
394 partial.setdefault(label.strip(), []).append(bin(node))
387 except KeyboardInterrupt:
395 except KeyboardInterrupt:
388 raise
396 raise
389 except Exception, inst:
397 except Exception, inst:
390 if self.ui.debugflag:
398 if self.ui.debugflag:
391 self.ui.warn(str(inst), '\n')
399 self.ui.warn(str(inst), '\n')
392 partial, last, lrev = {}, nullid, nullrev
400 partial, last, lrev = {}, nullid, nullrev
393 return partial, last, lrev
401 return partial, last, lrev
394
402
395 def _writebranchcache(self, branches, tip, tiprev):
403 def _writebranchcache(self, branches, tip, tiprev):
396 try:
404 try:
397 f = self.opener("branchheads.cache", "w", atomictemp=True)
405 f = self.opener("branchheads.cache", "w", atomictemp=True)
398 f.write("%s %s\n" % (hex(tip), tiprev))
406 f.write("%s %s\n" % (hex(tip), tiprev))
399 for label, nodes in branches.iteritems():
407 for label, nodes in branches.iteritems():
400 for node in nodes:
408 for node in nodes:
401 f.write("%s %s\n" % (hex(node), label))
409 f.write("%s %s\n" % (hex(node), label))
402 f.rename()
410 f.rename()
403 except (IOError, OSError):
411 except (IOError, OSError):
404 pass
412 pass
405
413
406 def _updatebranchcache(self, partial, start, end):
414 def _updatebranchcache(self, partial, start, end):
407 # collect new branch entries
415 # collect new branch entries
408 newbranches = {}
416 newbranches = {}
409 for r in xrange(start, end):
417 for r in xrange(start, end):
410 c = self[r]
418 c = self[r]
411 newbranches.setdefault(c.branch(), []).append(c.node())
419 newbranches.setdefault(c.branch(), []).append(c.node())
412 # if older branchheads are reachable from new ones, they aren't
420 # if older branchheads are reachable from new ones, they aren't
413 # really branchheads. Note checking parents is insufficient:
421 # really branchheads. Note checking parents is insufficient:
414 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
422 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
415 for branch, newnodes in newbranches.iteritems():
423 for branch, newnodes in newbranches.iteritems():
416 bheads = partial.setdefault(branch, [])
424 bheads = partial.setdefault(branch, [])
417 bheads.extend(newnodes)
425 bheads.extend(newnodes)
418 if len(bheads) < 2:
426 if len(bheads) < 2:
419 continue
427 continue
420 newbheads = []
428 newbheads = []
421 # starting from tip means fewer passes over reachable
429 # starting from tip means fewer passes over reachable
422 while newnodes:
430 while newnodes:
423 latest = newnodes.pop()
431 latest = newnodes.pop()
424 if latest not in bheads:
432 if latest not in bheads:
425 continue
433 continue
426 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
434 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
427 reachable = self.changelog.reachable(latest, minbhrev)
435 reachable = self.changelog.reachable(latest, minbhrev)
428 bheads = [b for b in bheads if b not in reachable]
436 bheads = [b for b in bheads if b not in reachable]
429 newbheads.insert(0, latest)
437 newbheads.insert(0, latest)
430 bheads.extend(newbheads)
438 bheads.extend(newbheads)
431 partial[branch] = bheads
439 partial[branch] = bheads
432
440
433 def lookup(self, key):
441 def lookup(self, key):
434 if isinstance(key, int):
442 if isinstance(key, int):
435 return self.changelog.node(key)
443 return self.changelog.node(key)
436 elif key == '.':
444 elif key == '.':
437 return self.dirstate.parents()[0]
445 return self.dirstate.parents()[0]
438 elif key == 'null':
446 elif key == 'null':
439 return nullid
447 return nullid
440 elif key == 'tip':
448 elif key == 'tip':
441 return self.changelog.tip()
449 return self.changelog.tip()
442 n = self.changelog._match(key)
450 n = self.changelog._match(key)
443 if n:
451 if n:
444 return n
452 return n
445 if key in self.tags():
453 if key in self.tags():
446 return self.tags()[key]
454 return self.tags()[key]
447 if key in self.branchtags():
455 if key in self.branchtags():
448 return self.branchtags()[key]
456 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
457 n = self.changelog._partialmatch(key)
450 if n:
458 if n:
451 return n
459 return n
452
460
453 # can't find key, check if it might have come from damaged dirstate
461 # can't find key, check if it might have come from damaged dirstate
454 if key in self.dirstate.parents():
462 if key in self.dirstate.parents():
455 raise error.Abort(_("working directory has unknown parent '%s'!")
463 raise error.Abort(_("working directory has unknown parent '%s'!")
456 % short(key))
464 % short(key))
457 try:
465 try:
458 if len(key) == 20:
466 if len(key) == 20:
459 key = hex(key)
467 key = hex(key)
460 except:
468 except:
461 pass
469 pass
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
470 raise error.RepoLookupError(_("unknown revision '%s'") % key)
463
471
464 def local(self):
472 def local(self):
465 return True
473 return True
466
474
467 def join(self, f):
475 def join(self, f):
468 return os.path.join(self.path, f)
476 return os.path.join(self.path, f)
469
477
470 def wjoin(self, f):
478 def wjoin(self, f):
471 return os.path.join(self.root, f)
479 return os.path.join(self.root, f)
472
480
473 def rjoin(self, f):
481 def rjoin(self, f):
474 return os.path.join(self.root, util.pconvert(f))
482 return os.path.join(self.root, util.pconvert(f))
475
483
476 def file(self, f):
484 def file(self, f):
477 if f[0] == '/':
485 if f[0] == '/':
478 f = f[1:]
486 f = f[1:]
479 return filelog.filelog(self.sopener, f)
487 return filelog.filelog(self.sopener, f)
480
488
481 def changectx(self, changeid):
489 def changectx(self, changeid):
482 return self[changeid]
490 return self[changeid]
483
491
484 def parents(self, changeid=None):
492 def parents(self, changeid=None):
485 '''get list of changectxs for parents of changeid'''
493 '''get list of changectxs for parents of changeid'''
486 return self[changeid].parents()
494 return self[changeid].parents()
487
495
488 def filectx(self, path, changeid=None, fileid=None):
496 def filectx(self, path, changeid=None, fileid=None):
489 """changeid can be a changeset revision, node, or tag.
497 """changeid can be a changeset revision, node, or tag.
490 fileid can be a file revision or node."""
498 fileid can be a file revision or node."""
491 return context.filectx(self, path, changeid, fileid)
499 return context.filectx(self, path, changeid, fileid)
492
500
493 def getcwd(self):
501 def getcwd(self):
494 return self.dirstate.getcwd()
502 return self.dirstate.getcwd()
495
503
496 def pathto(self, f, cwd=None):
504 def pathto(self, f, cwd=None):
497 return self.dirstate.pathto(f, cwd)
505 return self.dirstate.pathto(f, cwd)
498
506
499 def wfile(self, f, mode='r'):
507 def wfile(self, f, mode='r'):
500 return self.wopener(f, mode)
508 return self.wopener(f, mode)
501
509
502 def _link(self, f):
510 def _link(self, f):
503 return os.path.islink(self.wjoin(f))
511 return os.path.islink(self.wjoin(f))
504
512
505 def _filter(self, filter, filename, data):
513 def _filter(self, filter, filename, data):
506 if filter not in self.filterpats:
514 if filter not in self.filterpats:
507 l = []
515 l = []
508 for pat, cmd in self.ui.configitems(filter):
516 for pat, cmd in self.ui.configitems(filter):
509 if cmd == '!':
517 if cmd == '!':
510 continue
518 continue
511 mf = match_.match(self.root, '', [pat])
519 mf = match_.match(self.root, '', [pat])
512 fn = None
520 fn = None
513 params = cmd
521 params = cmd
514 for name, filterfn in self._datafilters.iteritems():
522 for name, filterfn in self._datafilters.iteritems():
515 if cmd.startswith(name):
523 if cmd.startswith(name):
516 fn = filterfn
524 fn = filterfn
517 params = cmd[len(name):].lstrip()
525 params = cmd[len(name):].lstrip()
518 break
526 break
519 if not fn:
527 if not fn:
520 fn = lambda s, c, **kwargs: util.filter(s, c)
528 fn = lambda s, c, **kwargs: util.filter(s, c)
521 # Wrap old filters not supporting keyword arguments
529 # Wrap old filters not supporting keyword arguments
522 if not inspect.getargspec(fn)[2]:
530 if not inspect.getargspec(fn)[2]:
523 oldfn = fn
531 oldfn = fn
524 fn = lambda s, c, **kwargs: oldfn(s, c)
532 fn = lambda s, c, **kwargs: oldfn(s, c)
525 l.append((mf, fn, params))
533 l.append((mf, fn, params))
526 self.filterpats[filter] = l
534 self.filterpats[filter] = l
527
535
528 for mf, fn, cmd in self.filterpats[filter]:
536 for mf, fn, cmd in self.filterpats[filter]:
529 if mf(filename):
537 if mf(filename):
530 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
531 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
532 break
540 break
533
541
534 return data
542 return data
535
543
536 def adddatafilter(self, name, filter):
544 def adddatafilter(self, name, filter):
537 self._datafilters[name] = filter
545 self._datafilters[name] = filter
538
546
539 def wread(self, filename):
547 def wread(self, filename):
540 if self._link(filename):
548 if self._link(filename):
541 data = os.readlink(self.wjoin(filename))
549 data = os.readlink(self.wjoin(filename))
542 else:
550 else:
543 data = self.wopener(filename, 'r').read()
551 data = self.wopener(filename, 'r').read()
544 return self._filter("encode", filename, data)
552 return self._filter("encode", filename, data)
545
553
546 def wwrite(self, filename, data, flags):
554 def wwrite(self, filename, data, flags):
547 data = self._filter("decode", filename, data)
555 data = self._filter("decode", filename, data)
548 try:
556 try:
549 os.unlink(self.wjoin(filename))
557 os.unlink(self.wjoin(filename))
550 except OSError:
558 except OSError:
551 pass
559 pass
552 if 'l' in flags:
560 if 'l' in flags:
553 self.wopener.symlink(data, filename)
561 self.wopener.symlink(data, filename)
554 else:
562 else:
555 self.wopener(filename, 'w').write(data)
563 self.wopener(filename, 'w').write(data)
556 if 'x' in flags:
564 if 'x' in flags:
557 util.set_flags(self.wjoin(filename), False, True)
565 util.set_flags(self.wjoin(filename), False, True)
558
566
559 def wwritedata(self, filename, data):
567 def wwritedata(self, filename, data):
560 return self._filter("decode", filename, data)
568 return self._filter("decode", filename, data)
561
569
562 def transaction(self):
570 def transaction(self):
563 tr = self._transref and self._transref() or None
571 tr = self._transref and self._transref() or None
564 if tr and tr.running():
572 if tr and tr.running():
565 return tr.nest()
573 return tr.nest()
566
574
567 # abort here if the journal already exists
575 # abort here if the journal already exists
568 if os.path.exists(self.sjoin("journal")):
576 if os.path.exists(self.sjoin("journal")):
569 raise error.RepoError(_("journal already exists - run hg recover"))
577 raise error.RepoError(_("journal already exists - run hg recover"))
570
578
571 # save dirstate for rollback
579 # save dirstate for rollback
572 try:
580 try:
573 ds = self.opener("dirstate").read()
581 ds = self.opener("dirstate").read()
574 except IOError:
582 except IOError:
575 ds = ""
583 ds = ""
576 self.opener("journal.dirstate", "w").write(ds)
584 self.opener("journal.dirstate", "w").write(ds)
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
585 self.opener("journal.branch", "w").write(self.dirstate.branch())
578
586
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
587 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
588 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 (self.join("journal.branch"), self.join("undo.branch"))]
589 (self.join("journal.branch"), self.join("undo.branch"))]
582 tr = transaction.transaction(self.ui.warn, self.sopener,
590 tr = transaction.transaction(self.ui.warn, self.sopener,
583 self.sjoin("journal"),
591 self.sjoin("journal"),
584 aftertrans(renames),
592 aftertrans(renames),
585 self.store.createmode)
593 self.store.createmode)
586 self._transref = weakref.ref(tr)
594 self._transref = weakref.ref(tr)
587 return tr
595 return tr
588
596
589 def recover(self):
597 def recover(self):
590 lock = self.lock()
598 lock = self.lock()
591 try:
599 try:
592 if os.path.exists(self.sjoin("journal")):
600 if os.path.exists(self.sjoin("journal")):
593 self.ui.status(_("rolling back interrupted transaction\n"))
601 self.ui.status(_("rolling back interrupted transaction\n"))
594 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
602 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
595 self.invalidate()
603 self.invalidate()
596 return True
604 return True
597 else:
605 else:
598 self.ui.warn(_("no interrupted transaction available\n"))
606 self.ui.warn(_("no interrupted transaction available\n"))
599 return False
607 return False
600 finally:
608 finally:
601 lock.release()
609 lock.release()
602
610
603 def rollback(self):
611 def rollback(self):
604 wlock = lock = None
612 wlock = lock = None
605 try:
613 try:
606 wlock = self.wlock()
614 wlock = self.wlock()
607 lock = self.lock()
615 lock = self.lock()
608 if os.path.exists(self.sjoin("undo")):
616 if os.path.exists(self.sjoin("undo")):
609 self.ui.status(_("rolling back last transaction\n"))
617 self.ui.status(_("rolling back last transaction\n"))
610 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
618 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
619 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 try:
620 try:
613 branch = self.opener("undo.branch").read()
621 branch = self.opener("undo.branch").read()
614 self.dirstate.setbranch(branch)
622 self.dirstate.setbranch(branch)
615 except IOError:
623 except IOError:
616 self.ui.warn(_("Named branch could not be reset, "
624 self.ui.warn(_("Named branch could not be reset, "
617 "current branch still is: %s\n")
625 "current branch still is: %s\n")
618 % encoding.tolocal(self.dirstate.branch()))
626 % encoding.tolocal(self.dirstate.branch()))
619 self.invalidate()
627 self.invalidate()
620 self.dirstate.invalidate()
628 self.dirstate.invalidate()
621 self.destroyed()
629 self.destroyed()
622 else:
630 else:
623 self.ui.warn(_("no rollback information available\n"))
631 self.ui.warn(_("no rollback information available\n"))
624 finally:
632 finally:
625 release(lock, wlock)
633 release(lock, wlock)
626
634
627 def invalidate(self):
635 def invalidate(self):
628 for a in "changelog manifest".split():
636 for a in "changelog manifest".split():
629 if a in self.__dict__:
637 if a in self.__dict__:
630 delattr(self, a)
638 delattr(self, a)
631 self._tags = None
639 self._tags = None
632 self._tagtypes = None
640 self._tagtypes = None
633 self.nodetagscache = None
641 self.nodetagscache = None
634 self.branchcache = None
642 self.branchcache = None
635 self._ubranchcache = None
643 self._ubranchcache = None
636 self._branchcachetip = None
644 self._branchcachetip = None
637
645
638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
646 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
639 try:
647 try:
640 l = lock.lock(lockname, 0, releasefn, desc=desc)
648 l = lock.lock(lockname, 0, releasefn, desc=desc)
641 except error.LockHeld, inst:
649 except error.LockHeld, inst:
642 if not wait:
650 if not wait:
643 raise
651 raise
644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
652 self.ui.warn(_("waiting for lock on %s held by %r\n") %
645 (desc, inst.locker))
653 (desc, inst.locker))
646 # default to 600 seconds timeout
654 # default to 600 seconds timeout
647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
655 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
648 releasefn, desc=desc)
656 releasefn, desc=desc)
649 if acquirefn:
657 if acquirefn:
650 acquirefn()
658 acquirefn()
651 return l
659 return l
652
660
653 def lock(self, wait=True):
661 def lock(self, wait=True):
654 '''Lock the repository store (.hg/store) and return a weak reference
662 '''Lock the repository store (.hg/store) and return a weak reference
655 to the lock. Use this before modifying the store (e.g. committing or
663 to the lock. Use this before modifying the store (e.g. committing or
656 stripping). If you are opening a transaction, get a lock as well.)'''
664 stripping). If you are opening a transaction, get a lock as well.)'''
657 l = self._lockref and self._lockref()
665 l = self._lockref and self._lockref()
658 if l is not None and l.held:
666 if l is not None and l.held:
659 l.lock()
667 l.lock()
660 return l
668 return l
661
669
662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
670 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
663 _('repository %s') % self.origroot)
671 _('repository %s') % self.origroot)
664 self._lockref = weakref.ref(l)
672 self._lockref = weakref.ref(l)
665 return l
673 return l
666
674
667 def wlock(self, wait=True):
675 def wlock(self, wait=True):
668 '''Lock the non-store parts of the repository (everything under
676 '''Lock the non-store parts of the repository (everything under
669 .hg except .hg/store) and return a weak reference to the lock.
677 .hg except .hg/store) and return a weak reference to the lock.
670 Use this before modifying files in .hg.'''
678 Use this before modifying files in .hg.'''
671 l = self._wlockref and self._wlockref()
679 l = self._wlockref and self._wlockref()
672 if l is not None and l.held:
680 if l is not None and l.held:
673 l.lock()
681 l.lock()
674 return l
682 return l
675
683
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
684 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 self.dirstate.invalidate, _('working directory of %s') %
685 self.dirstate.invalidate, _('working directory of %s') %
678 self.origroot)
686 self.origroot)
679 self._wlockref = weakref.ref(l)
687 self._wlockref = weakref.ref(l)
680 return l
688 return l
681
689
682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
690 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 """
691 """
684 commit an individual file as part of a larger transaction
692 commit an individual file as part of a larger transaction
685 """
693 """
686
694
687 fname = fctx.path()
695 fname = fctx.path()
688 text = fctx.data()
696 text = fctx.data()
689 flog = self.file(fname)
697 flog = self.file(fname)
690 fparent1 = manifest1.get(fname, nullid)
698 fparent1 = manifest1.get(fname, nullid)
691 fparent2 = fparent2o = manifest2.get(fname, nullid)
699 fparent2 = fparent2o = manifest2.get(fname, nullid)
692
700
693 meta = {}
701 meta = {}
694 copy = fctx.renamed()
702 copy = fctx.renamed()
695 if copy and copy[0] != fname:
703 if copy and copy[0] != fname:
696 # Mark the new revision of this file as a copy of another
704 # Mark the new revision of this file as a copy of another
697 # file. This copy data will effectively act as a parent
705 # file. This copy data will effectively act as a parent
698 # of this new revision. If this is a merge, the first
706 # of this new revision. If this is a merge, the first
699 # parent will be the nullid (meaning "look up the copy data")
707 # parent will be the nullid (meaning "look up the copy data")
700 # and the second one will be the other parent. For example:
708 # and the second one will be the other parent. For example:
701 #
709 #
702 # 0 --- 1 --- 3 rev1 changes file foo
710 # 0 --- 1 --- 3 rev1 changes file foo
703 # \ / rev2 renames foo to bar and changes it
711 # \ / rev2 renames foo to bar and changes it
704 # \- 2 -/ rev3 should have bar with all changes and
712 # \- 2 -/ rev3 should have bar with all changes and
705 # should record that bar descends from
713 # should record that bar descends from
706 # bar in rev2 and foo in rev1
714 # bar in rev2 and foo in rev1
707 #
715 #
708 # this allows this merge to succeed:
716 # this allows this merge to succeed:
709 #
717 #
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
718 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
719 # \ / merging rev3 and rev4 should use bar@rev2
712 # \- 2 --- 4 as the merge base
720 # \- 2 --- 4 as the merge base
713 #
721 #
714
722
715 cfname = copy[0]
723 cfname = copy[0]
716 crev = manifest1.get(cfname)
724 crev = manifest1.get(cfname)
717 newfparent = fparent2
725 newfparent = fparent2
718
726
719 if manifest2: # branch merge
727 if manifest2: # branch merge
720 if fparent2 == nullid or crev is None: # copied on remote side
728 if fparent2 == nullid or crev is None: # copied on remote side
721 if cfname in manifest2:
729 if cfname in manifest2:
722 crev = manifest2[cfname]
730 crev = manifest2[cfname]
723 newfparent = fparent1
731 newfparent = fparent1
724
732
725 # find source in nearest ancestor if we've lost track
733 # find source in nearest ancestor if we've lost track
726 if not crev:
734 if not crev:
727 self.ui.debug(" %s: searching for copy revision for %s\n" %
735 self.ui.debug(" %s: searching for copy revision for %s\n" %
728 (fname, cfname))
736 (fname, cfname))
729 for ancestor in self['.'].ancestors():
737 for ancestor in self['.'].ancestors():
730 if cfname in ancestor:
738 if cfname in ancestor:
731 crev = ancestor[cfname].filenode()
739 crev = ancestor[cfname].filenode()
732 break
740 break
733
741
734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
742 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
735 meta["copy"] = cfname
743 meta["copy"] = cfname
736 meta["copyrev"] = hex(crev)
744 meta["copyrev"] = hex(crev)
737 fparent1, fparent2 = nullid, newfparent
745 fparent1, fparent2 = nullid, newfparent
738 elif fparent2 != nullid:
746 elif fparent2 != nullid:
739 # is one parent an ancestor of the other?
747 # is one parent an ancestor of the other?
740 fparentancestor = flog.ancestor(fparent1, fparent2)
748 fparentancestor = flog.ancestor(fparent1, fparent2)
741 if fparentancestor == fparent1:
749 if fparentancestor == fparent1:
742 fparent1, fparent2 = fparent2, nullid
750 fparent1, fparent2 = fparent2, nullid
743 elif fparentancestor == fparent2:
751 elif fparentancestor == fparent2:
744 fparent2 = nullid
752 fparent2 = nullid
745
753
746 # is the file changed?
754 # is the file changed?
747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
755 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
748 changelist.append(fname)
756 changelist.append(fname)
749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
757 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
750
758
751 # are just the flags changed during merge?
759 # are just the flags changed during merge?
752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
760 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
753 changelist.append(fname)
761 changelist.append(fname)
754
762
755 return fparent1
763 return fparent1
756
764
757 def commit(self, text="", user=None, date=None, match=None, force=False,
765 def commit(self, text="", user=None, date=None, match=None, force=False,
758 editor=False, extra={}):
766 editor=False, extra={}):
759 """Add a new revision to current repository.
767 """Add a new revision to current repository.
760
768
761 Revision information is gathered from the working directory,
769 Revision information is gathered from the working directory,
762 match can be used to filter the committed files. If editor is
770 match can be used to filter the committed files. If editor is
763 supplied, it is called to get a commit message.
771 supplied, it is called to get a commit message.
764 """
772 """
765
773
766 def fail(f, msg):
774 def fail(f, msg):
767 raise util.Abort('%s: %s' % (f, msg))
775 raise util.Abort('%s: %s' % (f, msg))
768
776
769 if not match:
777 if not match:
770 match = match_.always(self.root, '')
778 match = match_.always(self.root, '')
771
779
772 if not force:
780 if not force:
773 vdirs = []
781 vdirs = []
774 match.dir = vdirs.append
782 match.dir = vdirs.append
775 match.bad = fail
783 match.bad = fail
776
784
777 wlock = self.wlock()
785 wlock = self.wlock()
778 try:
786 try:
779 p1, p2 = self.dirstate.parents()
787 p1, p2 = self.dirstate.parents()
780 wctx = self[None]
788 wctx = self[None]
781
789
782 if (not force and p2 != nullid and match and
790 if (not force and p2 != nullid and match and
783 (match.files() or match.anypats())):
791 (match.files() or match.anypats())):
784 raise util.Abort(_('cannot partially commit a merge '
792 raise util.Abort(_('cannot partially commit a merge '
785 '(do not specify files or patterns)'))
793 '(do not specify files or patterns)'))
786
794
787 changes = self.status(match=match, clean=force)
795 changes = self.status(match=match, clean=force)
788 if force:
796 if force:
789 changes[0].extend(changes[6]) # mq may commit unchanged files
797 changes[0].extend(changes[6]) # mq may commit unchanged files
790
798
791 # check subrepos
799 # check subrepos
792 subs = []
800 subs = []
793 for s in wctx.substate:
801 for s in wctx.substate:
794 if match(s) and wctx.sub(s).dirty():
802 if match(s) and wctx.sub(s).dirty():
795 subs.append(s)
803 subs.append(s)
796 if subs and '.hgsubstate' not in changes[0]:
804 if subs and '.hgsubstate' not in changes[0]:
797 changes[0].insert(0, '.hgsubstate')
805 changes[0].insert(0, '.hgsubstate')
798
806
799 # make sure all explicit patterns are matched
807 # make sure all explicit patterns are matched
800 if not force and match.files():
808 if not force and match.files():
801 matched = set(changes[0] + changes[1] + changes[2])
809 matched = set(changes[0] + changes[1] + changes[2])
802
810
803 for f in match.files():
811 for f in match.files():
804 if f == '.' or f in matched or f in wctx.substate:
812 if f == '.' or f in matched or f in wctx.substate:
805 continue
813 continue
806 if f in changes[3]: # missing
814 if f in changes[3]: # missing
807 fail(f, _('file not found!'))
815 fail(f, _('file not found!'))
808 if f in vdirs: # visited directory
816 if f in vdirs: # visited directory
809 d = f + '/'
817 d = f + '/'
810 for mf in matched:
818 for mf in matched:
811 if mf.startswith(d):
819 if mf.startswith(d):
812 break
820 break
813 else:
821 else:
814 fail(f, _("no match under directory!"))
822 fail(f, _("no match under directory!"))
815 elif f not in self.dirstate:
823 elif f not in self.dirstate:
816 fail(f, _("file not tracked!"))
824 fail(f, _("file not tracked!"))
817
825
818 if (not force and not extra.get("close") and p2 == nullid
826 if (not force and not extra.get("close") and p2 == nullid
819 and not (changes[0] or changes[1] or changes[2])
827 and not (changes[0] or changes[1] or changes[2])
820 and self[None].branch() == self['.'].branch()):
828 and self[None].branch() == self['.'].branch()):
821 return None
829 return None
822
830
823 ms = merge_.mergestate(self)
831 ms = merge_.mergestate(self)
824 for f in changes[0]:
832 for f in changes[0]:
825 if f in ms and ms[f] == 'u':
833 if f in ms and ms[f] == 'u':
826 raise util.Abort(_("unresolved merge conflicts "
834 raise util.Abort(_("unresolved merge conflicts "
827 "(see hg resolve)"))
835 "(see hg resolve)"))
828
836
829 cctx = context.workingctx(self, (p1, p2), text, user, date,
837 cctx = context.workingctx(self, (p1, p2), text, user, date,
830 extra, changes)
838 extra, changes)
831 if editor:
839 if editor:
832 cctx._text = editor(self, cctx, subs)
840 cctx._text = editor(self, cctx, subs)
833
841
834 # commit subs
842 # commit subs
835 if subs:
843 if subs:
836 state = wctx.substate.copy()
844 state = wctx.substate.copy()
837 for s in subs:
845 for s in subs:
838 self.ui.status(_('committing subrepository %s\n') % s)
846 self.ui.status(_('committing subrepository %s\n') % s)
839 sr = wctx.sub(s).commit(cctx._text, user, date)
847 sr = wctx.sub(s).commit(cctx._text, user, date)
840 state[s] = (state[s][0], sr)
848 state[s] = (state[s][0], sr)
841 subrepo.writestate(self, state)
849 subrepo.writestate(self, state)
842
850
843 ret = self.commitctx(cctx, True)
851 ret = self.commitctx(cctx, True)
844
852
845 # update dirstate and mergestate
853 # update dirstate and mergestate
846 for f in changes[0] + changes[1]:
854 for f in changes[0] + changes[1]:
847 self.dirstate.normal(f)
855 self.dirstate.normal(f)
848 for f in changes[2]:
856 for f in changes[2]:
849 self.dirstate.forget(f)
857 self.dirstate.forget(f)
850 self.dirstate.setparents(ret)
858 self.dirstate.setparents(ret)
851 ms.reset()
859 ms.reset()
852
860
853 return ret
861 return ret
854
862
855 finally:
863 finally:
856 wlock.release()
864 wlock.release()
857
865
858 def commitctx(self, ctx, error=False):
866 def commitctx(self, ctx, error=False):
859 """Add a new revision to current repository.
867 """Add a new revision to current repository.
860
868
861 Revision information is passed via the context argument.
869 Revision information is passed via the context argument.
862 """
870 """
863
871
864 tr = lock = None
872 tr = lock = None
865 removed = ctx.removed()
873 removed = ctx.removed()
866 p1, p2 = ctx.p1(), ctx.p2()
874 p1, p2 = ctx.p1(), ctx.p2()
867 m1 = p1.manifest().copy()
875 m1 = p1.manifest().copy()
868 m2 = p2.manifest()
876 m2 = p2.manifest()
869 user = ctx.user()
877 user = ctx.user()
870
878
871 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
879 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
872 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
880 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
873
881
874 lock = self.lock()
882 lock = self.lock()
875 try:
883 try:
876 tr = self.transaction()
884 tr = self.transaction()
877 trp = weakref.proxy(tr)
885 trp = weakref.proxy(tr)
878
886
879 # check in files
887 # check in files
880 new = {}
888 new = {}
881 changed = []
889 changed = []
882 linkrev = len(self)
890 linkrev = len(self)
883 for f in sorted(ctx.modified() + ctx.added()):
891 for f in sorted(ctx.modified() + ctx.added()):
884 self.ui.note(f + "\n")
892 self.ui.note(f + "\n")
885 try:
893 try:
886 fctx = ctx[f]
894 fctx = ctx[f]
887 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
895 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
888 changed)
896 changed)
889 m1.set(f, fctx.flags())
897 m1.set(f, fctx.flags())
890 except (OSError, IOError):
898 except (OSError, IOError):
891 if error:
899 if error:
892 self.ui.warn(_("trouble committing %s!\n") % f)
900 self.ui.warn(_("trouble committing %s!\n") % f)
893 raise
901 raise
894 else:
902 else:
895 removed.append(f)
903 removed.append(f)
896
904
897 # update manifest
905 # update manifest
898 m1.update(new)
906 m1.update(new)
899 removed = [f for f in sorted(removed) if f in m1 or f in m2]
907 removed = [f for f in sorted(removed) if f in m1 or f in m2]
900 drop = [f for f in removed if f in m1]
908 drop = [f for f in removed if f in m1]
901 for f in drop:
909 for f in drop:
902 del m1[f]
910 del m1[f]
903 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
911 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
904 p2.manifestnode(), (new, drop))
912 p2.manifestnode(), (new, drop))
905
913
906 # update changelog
914 # update changelog
907 self.changelog.delayupdate()
915 self.changelog.delayupdate()
908 n = self.changelog.add(mn, changed + removed, ctx.description(),
916 n = self.changelog.add(mn, changed + removed, ctx.description(),
909 trp, p1.node(), p2.node(),
917 trp, p1.node(), p2.node(),
910 user, ctx.date(), ctx.extra().copy())
918 user, ctx.date(), ctx.extra().copy())
911 p = lambda: self.changelog.writepending() and self.root or ""
919 p = lambda: self.changelog.writepending() and self.root or ""
912 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
920 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
913 parent2=xp2, pending=p)
921 parent2=xp2, pending=p)
914 self.changelog.finalize(trp)
922 self.changelog.finalize(trp)
915 tr.close()
923 tr.close()
916
924
917 if self.branchcache:
925 if self.branchcache:
918 self.branchtags()
926 self.branchtags()
919
927
920 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
928 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
921 return n
929 return n
922 finally:
930 finally:
923 del tr
931 del tr
924 lock.release()
932 lock.release()
925
933
926 def destroyed(self):
934 def destroyed(self):
927 '''Inform the repository that nodes have been destroyed.
935 '''Inform the repository that nodes have been destroyed.
928 Intended for use by strip and rollback, so there's a common
936 Intended for use by strip and rollback, so there's a common
929 place for anything that has to be done after destroying history.'''
937 place for anything that has to be done after destroying history.'''
930 # XXX it might be nice if we could take the list of destroyed
938 # XXX it might be nice if we could take the list of destroyed
931 # nodes, but I don't see an easy way for rollback() to do that
939 # nodes, but I don't see an easy way for rollback() to do that
932
940
933 # Ensure the persistent tag cache is updated. Doing it now
941 # Ensure the persistent tag cache is updated. Doing it now
934 # means that the tag cache only has to worry about destroyed
942 # means that the tag cache only has to worry about destroyed
935 # heads immediately after a strip/rollback. That in turn
943 # heads immediately after a strip/rollback. That in turn
936 # guarantees that "cachetip == currenttip" (comparing both rev
944 # guarantees that "cachetip == currenttip" (comparing both rev
937 # and node) always means no nodes have been added or destroyed.
945 # and node) always means no nodes have been added or destroyed.
938
946
939 # XXX this is suboptimal when qrefresh'ing: we strip the current
947 # XXX this is suboptimal when qrefresh'ing: we strip the current
940 # head, refresh the tag cache, then immediately add a new head.
948 # head, refresh the tag cache, then immediately add a new head.
941 # But I think doing it this way is necessary for the "instant
949 # But I think doing it this way is necessary for the "instant
942 # tag cache retrieval" case to work.
950 # tag cache retrieval" case to work.
943 tags_.findglobaltags(self.ui, self, {}, {})
951 tags_.findglobaltags(self.ui, self, {}, {})
944
952
945 def walk(self, match, node=None):
953 def walk(self, match, node=None):
946 '''
954 '''
947 walk recursively through the directory tree or a given
955 walk recursively through the directory tree or a given
948 changeset, finding all files matched by the match
956 changeset, finding all files matched by the match
949 function
957 function
950 '''
958 '''
951 return self[node].walk(match)
959 return self[node].walk(match)
952
960
953 def status(self, node1='.', node2=None, match=None,
961 def status(self, node1='.', node2=None, match=None,
954 ignored=False, clean=False, unknown=False):
962 ignored=False, clean=False, unknown=False):
955 """return status of files between two nodes or node and working directory
963 """return status of files between two nodes or node and working directory
956
964
957 If node1 is None, use the first dirstate parent instead.
965 If node1 is None, use the first dirstate parent instead.
958 If node2 is None, compare node1 with working directory.
966 If node2 is None, compare node1 with working directory.
959 """
967 """
960
968
961 def mfmatches(ctx):
969 def mfmatches(ctx):
962 mf = ctx.manifest().copy()
970 mf = ctx.manifest().copy()
963 for fn in mf.keys():
971 for fn in mf.keys():
964 if not match(fn):
972 if not match(fn):
965 del mf[fn]
973 del mf[fn]
966 return mf
974 return mf
967
975
968 if isinstance(node1, context.changectx):
976 if isinstance(node1, context.changectx):
969 ctx1 = node1
977 ctx1 = node1
970 else:
978 else:
971 ctx1 = self[node1]
979 ctx1 = self[node1]
972 if isinstance(node2, context.changectx):
980 if isinstance(node2, context.changectx):
973 ctx2 = node2
981 ctx2 = node2
974 else:
982 else:
975 ctx2 = self[node2]
983 ctx2 = self[node2]
976
984
977 working = ctx2.rev() is None
985 working = ctx2.rev() is None
978 parentworking = working and ctx1 == self['.']
986 parentworking = working and ctx1 == self['.']
979 match = match or match_.always(self.root, self.getcwd())
987 match = match or match_.always(self.root, self.getcwd())
980 listignored, listclean, listunknown = ignored, clean, unknown
988 listignored, listclean, listunknown = ignored, clean, unknown
981
989
982 # load earliest manifest first for caching reasons
990 # load earliest manifest first for caching reasons
983 if not working and ctx2.rev() < ctx1.rev():
991 if not working and ctx2.rev() < ctx1.rev():
984 ctx2.manifest()
992 ctx2.manifest()
985
993
986 if not parentworking:
994 if not parentworking:
987 def bad(f, msg):
995 def bad(f, msg):
988 if f not in ctx1:
996 if f not in ctx1:
989 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
997 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
990 match.bad = bad
998 match.bad = bad
991
999
992 if working: # we need to scan the working dir
1000 if working: # we need to scan the working dir
993 s = self.dirstate.status(match, listignored, listclean, listunknown)
1001 s = self.dirstate.status(match, listignored, listclean, listunknown)
994 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1002 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
995
1003
996 # check for any possibly clean files
1004 # check for any possibly clean files
997 if parentworking and cmp:
1005 if parentworking and cmp:
998 fixup = []
1006 fixup = []
999 # do a full compare of any files that might have changed
1007 # do a full compare of any files that might have changed
1000 for f in sorted(cmp):
1008 for f in sorted(cmp):
1001 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1009 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1002 or ctx1[f].cmp(ctx2[f].data())):
1010 or ctx1[f].cmp(ctx2[f].data())):
1003 modified.append(f)
1011 modified.append(f)
1004 else:
1012 else:
1005 fixup.append(f)
1013 fixup.append(f)
1006
1014
1007 if listclean:
1015 if listclean:
1008 clean += fixup
1016 clean += fixup
1009
1017
1010 # update dirstate for files that are actually clean
1018 # update dirstate for files that are actually clean
1011 if fixup:
1019 if fixup:
1012 try:
1020 try:
1013 # updating the dirstate is optional
1021 # updating the dirstate is optional
1014 # so we don't wait on the lock
1022 # so we don't wait on the lock
1015 wlock = self.wlock(False)
1023 wlock = self.wlock(False)
1016 try:
1024 try:
1017 for f in fixup:
1025 for f in fixup:
1018 self.dirstate.normal(f)
1026 self.dirstate.normal(f)
1019 finally:
1027 finally:
1020 wlock.release()
1028 wlock.release()
1021 except error.LockError:
1029 except error.LockError:
1022 pass
1030 pass
1023
1031
1024 if not parentworking:
1032 if not parentworking:
1025 mf1 = mfmatches(ctx1)
1033 mf1 = mfmatches(ctx1)
1026 if working:
1034 if working:
1027 # we are comparing working dir against non-parent
1035 # we are comparing working dir against non-parent
1028 # generate a pseudo-manifest for the working dir
1036 # generate a pseudo-manifest for the working dir
1029 mf2 = mfmatches(self['.'])
1037 mf2 = mfmatches(self['.'])
1030 for f in cmp + modified + added:
1038 for f in cmp + modified + added:
1031 mf2[f] = None
1039 mf2[f] = None
1032 mf2.set(f, ctx2.flags(f))
1040 mf2.set(f, ctx2.flags(f))
1033 for f in removed:
1041 for f in removed:
1034 if f in mf2:
1042 if f in mf2:
1035 del mf2[f]
1043 del mf2[f]
1036 else:
1044 else:
1037 # we are comparing two revisions
1045 # we are comparing two revisions
1038 deleted, unknown, ignored = [], [], []
1046 deleted, unknown, ignored = [], [], []
1039 mf2 = mfmatches(ctx2)
1047 mf2 = mfmatches(ctx2)
1040
1048
1041 modified, added, clean = [], [], []
1049 modified, added, clean = [], [], []
1042 for fn in mf2:
1050 for fn in mf2:
1043 if fn in mf1:
1051 if fn in mf1:
1044 if (mf1.flags(fn) != mf2.flags(fn) or
1052 if (mf1.flags(fn) != mf2.flags(fn) or
1045 (mf1[fn] != mf2[fn] and
1053 (mf1[fn] != mf2[fn] and
1046 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1054 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1047 modified.append(fn)
1055 modified.append(fn)
1048 elif listclean:
1056 elif listclean:
1049 clean.append(fn)
1057 clean.append(fn)
1050 del mf1[fn]
1058 del mf1[fn]
1051 else:
1059 else:
1052 added.append(fn)
1060 added.append(fn)
1053 removed = mf1.keys()
1061 removed = mf1.keys()
1054
1062
1055 r = modified, added, removed, deleted, unknown, ignored, clean
1063 r = modified, added, removed, deleted, unknown, ignored, clean
1056 [l.sort() for l in r]
1064 [l.sort() for l in r]
1057 return r
1065 return r
1058
1066
1059 def add(self, list):
1067 def add(self, list):
1060 wlock = self.wlock()
1068 wlock = self.wlock()
1061 try:
1069 try:
1062 rejected = []
1070 rejected = []
1063 for f in list:
1071 for f in list:
1064 p = self.wjoin(f)
1072 p = self.wjoin(f)
1065 try:
1073 try:
1066 st = os.lstat(p)
1074 st = os.lstat(p)
1067 except:
1075 except:
1068 self.ui.warn(_("%s does not exist!\n") % f)
1076 self.ui.warn(_("%s does not exist!\n") % f)
1069 rejected.append(f)
1077 rejected.append(f)
1070 continue
1078 continue
1071 if st.st_size > 10000000:
1079 if st.st_size > 10000000:
1072 self.ui.warn(_("%s: files over 10MB may cause memory and"
1080 self.ui.warn(_("%s: files over 10MB may cause memory and"
1073 " performance problems\n"
1081 " performance problems\n"
1074 "(use 'hg revert %s' to unadd the file)\n")
1082 "(use 'hg revert %s' to unadd the file)\n")
1075 % (f, f))
1083 % (f, f))
1076 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1084 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1077 self.ui.warn(_("%s not added: only files and symlinks "
1085 self.ui.warn(_("%s not added: only files and symlinks "
1078 "supported currently\n") % f)
1086 "supported currently\n") % f)
1079 rejected.append(p)
1087 rejected.append(p)
1080 elif self.dirstate[f] in 'amn':
1088 elif self.dirstate[f] in 'amn':
1081 self.ui.warn(_("%s already tracked!\n") % f)
1089 self.ui.warn(_("%s already tracked!\n") % f)
1082 elif self.dirstate[f] == 'r':
1090 elif self.dirstate[f] == 'r':
1083 self.dirstate.normallookup(f)
1091 self.dirstate.normallookup(f)
1084 else:
1092 else:
1085 self.dirstate.add(f)
1093 self.dirstate.add(f)
1086 return rejected
1094 return rejected
1087 finally:
1095 finally:
1088 wlock.release()
1096 wlock.release()
1089
1097
1090 def forget(self, list):
1098 def forget(self, list):
1091 wlock = self.wlock()
1099 wlock = self.wlock()
1092 try:
1100 try:
1093 for f in list:
1101 for f in list:
1094 if self.dirstate[f] != 'a':
1102 if self.dirstate[f] != 'a':
1095 self.ui.warn(_("%s not added!\n") % f)
1103 self.ui.warn(_("%s not added!\n") % f)
1096 else:
1104 else:
1097 self.dirstate.forget(f)
1105 self.dirstate.forget(f)
1098 finally:
1106 finally:
1099 wlock.release()
1107 wlock.release()
1100
1108
1101 def remove(self, list, unlink=False):
1109 def remove(self, list, unlink=False):
1102 if unlink:
1110 if unlink:
1103 for f in list:
1111 for f in list:
1104 try:
1112 try:
1105 util.unlink(self.wjoin(f))
1113 util.unlink(self.wjoin(f))
1106 except OSError, inst:
1114 except OSError, inst:
1107 if inst.errno != errno.ENOENT:
1115 if inst.errno != errno.ENOENT:
1108 raise
1116 raise
1109 wlock = self.wlock()
1117 wlock = self.wlock()
1110 try:
1118 try:
1111 for f in list:
1119 for f in list:
1112 if unlink and os.path.exists(self.wjoin(f)):
1120 if unlink and os.path.exists(self.wjoin(f)):
1113 self.ui.warn(_("%s still exists!\n") % f)
1121 self.ui.warn(_("%s still exists!\n") % f)
1114 elif self.dirstate[f] == 'a':
1122 elif self.dirstate[f] == 'a':
1115 self.dirstate.forget(f)
1123 self.dirstate.forget(f)
1116 elif f not in self.dirstate:
1124 elif f not in self.dirstate:
1117 self.ui.warn(_("%s not tracked!\n") % f)
1125 self.ui.warn(_("%s not tracked!\n") % f)
1118 else:
1126 else:
1119 self.dirstate.remove(f)
1127 self.dirstate.remove(f)
1120 finally:
1128 finally:
1121 wlock.release()
1129 wlock.release()
1122
1130
1123 def undelete(self, list):
1131 def undelete(self, list):
1124 manifests = [self.manifest.read(self.changelog.read(p)[0])
1132 manifests = [self.manifest.read(self.changelog.read(p)[0])
1125 for p in self.dirstate.parents() if p != nullid]
1133 for p in self.dirstate.parents() if p != nullid]
1126 wlock = self.wlock()
1134 wlock = self.wlock()
1127 try:
1135 try:
1128 for f in list:
1136 for f in list:
1129 if self.dirstate[f] != 'r':
1137 if self.dirstate[f] != 'r':
1130 self.ui.warn(_("%s not removed!\n") % f)
1138 self.ui.warn(_("%s not removed!\n") % f)
1131 else:
1139 else:
1132 m = f in manifests[0] and manifests[0] or manifests[1]
1140 m = f in manifests[0] and manifests[0] or manifests[1]
1133 t = self.file(f).read(m[f])
1141 t = self.file(f).read(m[f])
1134 self.wwrite(f, t, m.flags(f))
1142 self.wwrite(f, t, m.flags(f))
1135 self.dirstate.normal(f)
1143 self.dirstate.normal(f)
1136 finally:
1144 finally:
1137 wlock.release()
1145 wlock.release()
1138
1146
1139 def copy(self, source, dest):
1147 def copy(self, source, dest):
1140 p = self.wjoin(dest)
1148 p = self.wjoin(dest)
1141 if not (os.path.exists(p) or os.path.islink(p)):
1149 if not (os.path.exists(p) or os.path.islink(p)):
1142 self.ui.warn(_("%s does not exist!\n") % dest)
1150 self.ui.warn(_("%s does not exist!\n") % dest)
1143 elif not (os.path.isfile(p) or os.path.islink(p)):
1151 elif not (os.path.isfile(p) or os.path.islink(p)):
1144 self.ui.warn(_("copy failed: %s is not a file or a "
1152 self.ui.warn(_("copy failed: %s is not a file or a "
1145 "symbolic link\n") % dest)
1153 "symbolic link\n") % dest)
1146 else:
1154 else:
1147 wlock = self.wlock()
1155 wlock = self.wlock()
1148 try:
1156 try:
1149 if self.dirstate[dest] in '?r':
1157 if self.dirstate[dest] in '?r':
1150 self.dirstate.add(dest)
1158 self.dirstate.add(dest)
1151 self.dirstate.copy(source, dest)
1159 self.dirstate.copy(source, dest)
1152 finally:
1160 finally:
1153 wlock.release()
1161 wlock.release()
1154
1162
1155 def heads(self, start=None):
1163 def heads(self, start=None):
1156 heads = self.changelog.heads(start)
1164 heads = self.changelog.heads(start)
1157 # sort the output in rev descending order
1165 # sort the output in rev descending order
1158 heads = [(-self.changelog.rev(h), h) for h in heads]
1166 heads = [(-self.changelog.rev(h), h) for h in heads]
1159 return [n for (r, n) in sorted(heads)]
1167 return [n for (r, n) in sorted(heads)]
1160
1168
1161 def branchheads(self, branch=None, start=None, closed=False):
1169 def branchheads(self, branch=None, start=None, closed=False):
1162 '''return a (possibly filtered) list of heads for the given branch
1170 '''return a (possibly filtered) list of heads for the given branch
1163
1171
1164 Heads are returned in topological order, from newest to oldest.
1172 Heads are returned in topological order, from newest to oldest.
1165 If branch is None, use the dirstate branch.
1173 If branch is None, use the dirstate branch.
1166 If start is not None, return only heads reachable from start.
1174 If start is not None, return only heads reachable from start.
1167 If closed is True, return heads that are marked as closed as well.
1175 If closed is True, return heads that are marked as closed as well.
1168 '''
1176 '''
1169 if branch is None:
1177 if branch is None:
1170 branch = self[None].branch()
1178 branch = self[None].branch()
1171 branches = self.branchmap()
1179 branches = self.lbranchmap()
1172 if branch not in branches:
1180 if branch not in branches:
1173 return []
1181 return []
1174 # the cache returns heads ordered lowest to highest
1182 # the cache returns heads ordered lowest to highest
1175 bheads = list(reversed(branches[branch]))
1183 bheads = list(reversed(branches[branch]))
1176 if start is not None:
1184 if start is not None:
1177 # filter out the heads that cannot be reached from startrev
1185 # filter out the heads that cannot be reached from startrev
1178 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1186 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1179 bheads = [h for h in bheads if h in fbheads]
1187 bheads = [h for h in bheads if h in fbheads]
1180 if not closed:
1188 if not closed:
1181 bheads = [h for h in bheads if
1189 bheads = [h for h in bheads if
1182 ('close' not in self.changelog.read(h)[5])]
1190 ('close' not in self.changelog.read(h)[5])]
1183 return bheads
1191 return bheads
1184
1192
1185 def branches(self, nodes):
1193 def branches(self, nodes):
1186 if not nodes:
1194 if not nodes:
1187 nodes = [self.changelog.tip()]
1195 nodes = [self.changelog.tip()]
1188 b = []
1196 b = []
1189 for n in nodes:
1197 for n in nodes:
1190 t = n
1198 t = n
1191 while 1:
1199 while 1:
1192 p = self.changelog.parents(n)
1200 p = self.changelog.parents(n)
1193 if p[1] != nullid or p[0] == nullid:
1201 if p[1] != nullid or p[0] == nullid:
1194 b.append((t, n, p[0], p[1]))
1202 b.append((t, n, p[0], p[1]))
1195 break
1203 break
1196 n = p[0]
1204 n = p[0]
1197 return b
1205 return b
1198
1206
1199 def between(self, pairs):
1207 def between(self, pairs):
1200 r = []
1208 r = []
1201
1209
1202 for top, bottom in pairs:
1210 for top, bottom in pairs:
1203 n, l, i = top, [], 0
1211 n, l, i = top, [], 0
1204 f = 1
1212 f = 1
1205
1213
1206 while n != bottom and n != nullid:
1214 while n != bottom and n != nullid:
1207 p = self.changelog.parents(n)[0]
1215 p = self.changelog.parents(n)[0]
1208 if i == f:
1216 if i == f:
1209 l.append(n)
1217 l.append(n)
1210 f = f * 2
1218 f = f * 2
1211 n = p
1219 n = p
1212 i += 1
1220 i += 1
1213
1221
1214 r.append(l)
1222 r.append(l)
1215
1223
1216 return r
1224 return r
1217
1225
1218 def findincoming(self, remote, base=None, heads=None, force=False):
1226 def findincoming(self, remote, base=None, heads=None, force=False):
1219 """Return list of roots of the subsets of missing nodes from remote
1227 """Return list of roots of the subsets of missing nodes from remote
1220
1228
1221 If base dict is specified, assume that these nodes and their parents
1229 If base dict is specified, assume that these nodes and their parents
1222 exist on the remote side and that no child of a node of base exists
1230 exist on the remote side and that no child of a node of base exists
1223 in both remote and self.
1231 in both remote and self.
1224 Furthermore base will be updated to include the nodes that exists
1232 Furthermore base will be updated to include the nodes that exists
1225 in self and remote but no children exists in self and remote.
1233 in self and remote but no children exists in self and remote.
1226 If a list of heads is specified, return only nodes which are heads
1234 If a list of heads is specified, return only nodes which are heads
1227 or ancestors of these heads.
1235 or ancestors of these heads.
1228
1236
1229 All the ancestors of base are in self and in remote.
1237 All the ancestors of base are in self and in remote.
1230 All the descendants of the list returned are missing in self.
1238 All the descendants of the list returned are missing in self.
1231 (and so we know that the rest of the nodes are missing in remote, see
1239 (and so we know that the rest of the nodes are missing in remote, see
1232 outgoing)
1240 outgoing)
1233 """
1241 """
1234 return self.findcommonincoming(remote, base, heads, force)[1]
1242 return self.findcommonincoming(remote, base, heads, force)[1]
1235
1243
1236 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1244 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1237 """Return a tuple (common, missing roots, heads) used to identify
1245 """Return a tuple (common, missing roots, heads) used to identify
1238 missing nodes from remote.
1246 missing nodes from remote.
1239
1247
1240 If base dict is specified, assume that these nodes and their parents
1248 If base dict is specified, assume that these nodes and their parents
1241 exist on the remote side and that no child of a node of base exists
1249 exist on the remote side and that no child of a node of base exists
1242 in both remote and self.
1250 in both remote and self.
1243 Furthermore base will be updated to include the nodes that exists
1251 Furthermore base will be updated to include the nodes that exists
1244 in self and remote but no children exists in self and remote.
1252 in self and remote but no children exists in self and remote.
1245 If a list of heads is specified, return only nodes which are heads
1253 If a list of heads is specified, return only nodes which are heads
1246 or ancestors of these heads.
1254 or ancestors of these heads.
1247
1255
1248 All the ancestors of base are in self and in remote.
1256 All the ancestors of base are in self and in remote.
1249 """
1257 """
1250 m = self.changelog.nodemap
1258 m = self.changelog.nodemap
1251 search = []
1259 search = []
1252 fetch = set()
1260 fetch = set()
1253 seen = set()
1261 seen = set()
1254 seenbranch = set()
1262 seenbranch = set()
1255 if base is None:
1263 if base is None:
1256 base = {}
1264 base = {}
1257
1265
1258 if not heads:
1266 if not heads:
1259 heads = remote.heads()
1267 heads = remote.heads()
1260
1268
1261 if self.changelog.tip() == nullid:
1269 if self.changelog.tip() == nullid:
1262 base[nullid] = 1
1270 base[nullid] = 1
1263 if heads != [nullid]:
1271 if heads != [nullid]:
1264 return [nullid], [nullid], list(heads)
1272 return [nullid], [nullid], list(heads)
1265 return [nullid], [], []
1273 return [nullid], [], []
1266
1274
1267 # assume we're closer to the tip than the root
1275 # assume we're closer to the tip than the root
1268 # and start by examining the heads
1276 # and start by examining the heads
1269 self.ui.status(_("searching for changes\n"))
1277 self.ui.status(_("searching for changes\n"))
1270
1278
1271 unknown = []
1279 unknown = []
1272 for h in heads:
1280 for h in heads:
1273 if h not in m:
1281 if h not in m:
1274 unknown.append(h)
1282 unknown.append(h)
1275 else:
1283 else:
1276 base[h] = 1
1284 base[h] = 1
1277
1285
1278 heads = unknown
1286 heads = unknown
1279 if not unknown:
1287 if not unknown:
1280 return base.keys(), [], []
1288 return base.keys(), [], []
1281
1289
1282 req = set(unknown)
1290 req = set(unknown)
1283 reqcnt = 0
1291 reqcnt = 0
1284
1292
1285 # search through remote branches
1293 # search through remote branches
1286 # a 'branch' here is a linear segment of history, with four parts:
1294 # a 'branch' here is a linear segment of history, with four parts:
1287 # head, root, first parent, second parent
1295 # head, root, first parent, second parent
1288 # (a branch always has two parents (or none) by definition)
1296 # (a branch always has two parents (or none) by definition)
1289 unknown = remote.branches(unknown)
1297 unknown = remote.branches(unknown)
1290 while unknown:
1298 while unknown:
1291 r = []
1299 r = []
1292 while unknown:
1300 while unknown:
1293 n = unknown.pop(0)
1301 n = unknown.pop(0)
1294 if n[0] in seen:
1302 if n[0] in seen:
1295 continue
1303 continue
1296
1304
1297 self.ui.debug("examining %s:%s\n"
1305 self.ui.debug("examining %s:%s\n"
1298 % (short(n[0]), short(n[1])))
1306 % (short(n[0]), short(n[1])))
1299 if n[0] == nullid: # found the end of the branch
1307 if n[0] == nullid: # found the end of the branch
1300 pass
1308 pass
1301 elif n in seenbranch:
1309 elif n in seenbranch:
1302 self.ui.debug("branch already found\n")
1310 self.ui.debug("branch already found\n")
1303 continue
1311 continue
1304 elif n[1] and n[1] in m: # do we know the base?
1312 elif n[1] and n[1] in m: # do we know the base?
1305 self.ui.debug("found incomplete branch %s:%s\n"
1313 self.ui.debug("found incomplete branch %s:%s\n"
1306 % (short(n[0]), short(n[1])))
1314 % (short(n[0]), short(n[1])))
1307 search.append(n[0:2]) # schedule branch range for scanning
1315 search.append(n[0:2]) # schedule branch range for scanning
1308 seenbranch.add(n)
1316 seenbranch.add(n)
1309 else:
1317 else:
1310 if n[1] not in seen and n[1] not in fetch:
1318 if n[1] not in seen and n[1] not in fetch:
1311 if n[2] in m and n[3] in m:
1319 if n[2] in m and n[3] in m:
1312 self.ui.debug("found new changeset %s\n" %
1320 self.ui.debug("found new changeset %s\n" %
1313 short(n[1]))
1321 short(n[1]))
1314 fetch.add(n[1]) # earliest unknown
1322 fetch.add(n[1]) # earliest unknown
1315 for p in n[2:4]:
1323 for p in n[2:4]:
1316 if p in m:
1324 if p in m:
1317 base[p] = 1 # latest known
1325 base[p] = 1 # latest known
1318
1326
1319 for p in n[2:4]:
1327 for p in n[2:4]:
1320 if p not in req and p not in m:
1328 if p not in req and p not in m:
1321 r.append(p)
1329 r.append(p)
1322 req.add(p)
1330 req.add(p)
1323 seen.add(n[0])
1331 seen.add(n[0])
1324
1332
1325 if r:
1333 if r:
1326 reqcnt += 1
1334 reqcnt += 1
1327 self.ui.debug("request %d: %s\n" %
1335 self.ui.debug("request %d: %s\n" %
1328 (reqcnt, " ".join(map(short, r))))
1336 (reqcnt, " ".join(map(short, r))))
1329 for p in xrange(0, len(r), 10):
1337 for p in xrange(0, len(r), 10):
1330 for b in remote.branches(r[p:p+10]):
1338 for b in remote.branches(r[p:p+10]):
1331 self.ui.debug("received %s:%s\n" %
1339 self.ui.debug("received %s:%s\n" %
1332 (short(b[0]), short(b[1])))
1340 (short(b[0]), short(b[1])))
1333 unknown.append(b)
1341 unknown.append(b)
1334
1342
1335 # do binary search on the branches we found
1343 # do binary search on the branches we found
1336 while search:
1344 while search:
1337 newsearch = []
1345 newsearch = []
1338 reqcnt += 1
1346 reqcnt += 1
1339 for n, l in zip(search, remote.between(search)):
1347 for n, l in zip(search, remote.between(search)):
1340 l.append(n[1])
1348 l.append(n[1])
1341 p = n[0]
1349 p = n[0]
1342 f = 1
1350 f = 1
1343 for i in l:
1351 for i in l:
1344 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1352 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1345 if i in m:
1353 if i in m:
1346 if f <= 2:
1354 if f <= 2:
1347 self.ui.debug("found new branch changeset %s\n" %
1355 self.ui.debug("found new branch changeset %s\n" %
1348 short(p))
1356 short(p))
1349 fetch.add(p)
1357 fetch.add(p)
1350 base[i] = 1
1358 base[i] = 1
1351 else:
1359 else:
1352 self.ui.debug("narrowed branch search to %s:%s\n"
1360 self.ui.debug("narrowed branch search to %s:%s\n"
1353 % (short(p), short(i)))
1361 % (short(p), short(i)))
1354 newsearch.append((p, i))
1362 newsearch.append((p, i))
1355 break
1363 break
1356 p, f = i, f * 2
1364 p, f = i, f * 2
1357 search = newsearch
1365 search = newsearch
1358
1366
1359 # sanity check our fetch list
1367 # sanity check our fetch list
1360 for f in fetch:
1368 for f in fetch:
1361 if f in m:
1369 if f in m:
1362 raise error.RepoError(_("already have changeset ")
1370 raise error.RepoError(_("already have changeset ")
1363 + short(f[:4]))
1371 + short(f[:4]))
1364
1372
1365 if base.keys() == [nullid]:
1373 if base.keys() == [nullid]:
1366 if force:
1374 if force:
1367 self.ui.warn(_("warning: repository is unrelated\n"))
1375 self.ui.warn(_("warning: repository is unrelated\n"))
1368 else:
1376 else:
1369 raise util.Abort(_("repository is unrelated"))
1377 raise util.Abort(_("repository is unrelated"))
1370
1378
1371 self.ui.debug("found new changesets starting at " +
1379 self.ui.debug("found new changesets starting at " +
1372 " ".join([short(f) for f in fetch]) + "\n")
1380 " ".join([short(f) for f in fetch]) + "\n")
1373
1381
1374 self.ui.debug("%d total queries\n" % reqcnt)
1382 self.ui.debug("%d total queries\n" % reqcnt)
1375
1383
1376 return base.keys(), list(fetch), heads
1384 return base.keys(), list(fetch), heads
1377
1385
1378 def findoutgoing(self, remote, base=None, heads=None, force=False):
1386 def findoutgoing(self, remote, base=None, heads=None, force=False):
1379 """Return list of nodes that are roots of subsets not in remote
1387 """Return list of nodes that are roots of subsets not in remote
1380
1388
1381 If base dict is specified, assume that these nodes and their parents
1389 If base dict is specified, assume that these nodes and their parents
1382 exist on the remote side.
1390 exist on the remote side.
1383 If a list of heads is specified, return only nodes which are heads
1391 If a list of heads is specified, return only nodes which are heads
1384 or ancestors of these heads, and return a second element which
1392 or ancestors of these heads, and return a second element which
1385 contains all remote heads which get new children.
1393 contains all remote heads which get new children.
1386 """
1394 """
1387 if base is None:
1395 if base is None:
1388 base = {}
1396 base = {}
1389 self.findincoming(remote, base, heads, force=force)
1397 self.findincoming(remote, base, heads, force=force)
1390
1398
1391 self.ui.debug("common changesets up to "
1399 self.ui.debug("common changesets up to "
1392 + " ".join(map(short, base.keys())) + "\n")
1400 + " ".join(map(short, base.keys())) + "\n")
1393
1401
1394 remain = set(self.changelog.nodemap)
1402 remain = set(self.changelog.nodemap)
1395
1403
1396 # prune everything remote has from the tree
1404 # prune everything remote has from the tree
1397 remain.remove(nullid)
1405 remain.remove(nullid)
1398 remove = base.keys()
1406 remove = base.keys()
1399 while remove:
1407 while remove:
1400 n = remove.pop(0)
1408 n = remove.pop(0)
1401 if n in remain:
1409 if n in remain:
1402 remain.remove(n)
1410 remain.remove(n)
1403 for p in self.changelog.parents(n):
1411 for p in self.changelog.parents(n):
1404 remove.append(p)
1412 remove.append(p)
1405
1413
1406 # find every node whose parents have been pruned
1414 # find every node whose parents have been pruned
1407 subset = []
1415 subset = []
1408 # find every remote head that will get new children
1416 # find every remote head that will get new children
1409 updated_heads = set()
1417 updated_heads = set()
1410 for n in remain:
1418 for n in remain:
1411 p1, p2 = self.changelog.parents(n)
1419 p1, p2 = self.changelog.parents(n)
1412 if p1 not in remain and p2 not in remain:
1420 if p1 not in remain and p2 not in remain:
1413 subset.append(n)
1421 subset.append(n)
1414 if heads:
1422 if heads:
1415 if p1 in heads:
1423 if p1 in heads:
1416 updated_heads.add(p1)
1424 updated_heads.add(p1)
1417 if p2 in heads:
1425 if p2 in heads:
1418 updated_heads.add(p2)
1426 updated_heads.add(p2)
1419
1427
1420 # this is the set of all roots we have to push
1428 # this is the set of all roots we have to push
1421 if heads:
1429 if heads:
1422 return subset, list(updated_heads)
1430 return subset, list(updated_heads)
1423 else:
1431 else:
1424 return subset
1432 return subset
1425
1433
1426 def pull(self, remote, heads=None, force=False):
1434 def pull(self, remote, heads=None, force=False):
1427 lock = self.lock()
1435 lock = self.lock()
1428 try:
1436 try:
1429 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1437 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1430 force=force)
1438 force=force)
1431 if fetch == [nullid]:
1439 if fetch == [nullid]:
1432 self.ui.status(_("requesting all changes\n"))
1440 self.ui.status(_("requesting all changes\n"))
1433
1441
1434 if not fetch:
1442 if not fetch:
1435 self.ui.status(_("no changes found\n"))
1443 self.ui.status(_("no changes found\n"))
1436 return 0
1444 return 0
1437
1445
1438 if heads is None and remote.capable('changegroupsubset'):
1446 if heads is None and remote.capable('changegroupsubset'):
1439 heads = rheads
1447 heads = rheads
1440
1448
1441 if heads is None:
1449 if heads is None:
1442 cg = remote.changegroup(fetch, 'pull')
1450 cg = remote.changegroup(fetch, 'pull')
1443 else:
1451 else:
1444 if not remote.capable('changegroupsubset'):
1452 if not remote.capable('changegroupsubset'):
1445 raise util.Abort(_("Partial pull cannot be done because "
1453 raise util.Abort(_("Partial pull cannot be done because "
1446 "other repository doesn't support "
1454 "other repository doesn't support "
1447 "changegroupsubset."))
1455 "changegroupsubset."))
1448 cg = remote.changegroupsubset(fetch, heads, 'pull')
1456 cg = remote.changegroupsubset(fetch, heads, 'pull')
1449 return self.addchangegroup(cg, 'pull', remote.url())
1457 return self.addchangegroup(cg, 'pull', remote.url())
1450 finally:
1458 finally:
1451 lock.release()
1459 lock.release()
1452
1460
1453 def push(self, remote, force=False, revs=None):
1461 def push(self, remote, force=False, revs=None):
1454 # there are two ways to push to remote repo:
1462 # there are two ways to push to remote repo:
1455 #
1463 #
1456 # addchangegroup assumes local user can lock remote
1464 # addchangegroup assumes local user can lock remote
1457 # repo (local filesystem, old ssh servers).
1465 # repo (local filesystem, old ssh servers).
1458 #
1466 #
1459 # unbundle assumes local user cannot lock remote repo (new ssh
1467 # unbundle assumes local user cannot lock remote repo (new ssh
1460 # servers, http servers).
1468 # servers, http servers).
1461
1469
1462 if remote.capable('unbundle'):
1470 if remote.capable('unbundle'):
1463 return self.push_unbundle(remote, force, revs)
1471 return self.push_unbundle(remote, force, revs)
1464 return self.push_addchangegroup(remote, force, revs)
1472 return self.push_addchangegroup(remote, force, revs)
1465
1473
1466 def prepush(self, remote, force, revs):
1474 def prepush(self, remote, force, revs):
1467 '''Analyze the local and remote repositories and determine which
1475 '''Analyze the local and remote repositories and determine which
1468 changesets need to be pushed to the remote. Return a tuple
1476 changesets need to be pushed to the remote. Return a tuple
1469 (changegroup, remoteheads). changegroup is a readable file-like
1477 (changegroup, remoteheads). changegroup is a readable file-like
1470 object whose read() returns successive changegroup chunks ready to
1478 object whose read() returns successive changegroup chunks ready to
1471 be sent over the wire. remoteheads is the list of remote heads.
1479 be sent over the wire. remoteheads is the list of remote heads.
1472 '''
1480 '''
1473 common = {}
1481 common = {}
1474 remote_heads = remote.heads()
1482 remote_heads = remote.heads()
1475 inc = self.findincoming(remote, common, remote_heads, force=force)
1483 inc = self.findincoming(remote, common, remote_heads, force=force)
1476
1484
1477 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1485 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1478 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1486 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1479
1487
1480 def checkbranch(lheads, rheads, updatelb):
1488 def checkbranch(lheads, rheads, updatelb):
1481 '''
1489 '''
1482 check whether there are more local heads than remote heads on
1490 check whether there are more local heads than remote heads on
1483 a specific branch.
1491 a specific branch.
1484
1492
1485 lheads: local branch heads
1493 lheads: local branch heads
1486 rheads: remote branch heads
1494 rheads: remote branch heads
1487 updatelb: outgoing local branch bases
1495 updatelb: outgoing local branch bases
1488 '''
1496 '''
1489
1497
1490 warn = 0
1498 warn = 0
1491
1499
1492 if not revs and len(lheads) > len(rheads):
1500 if not revs and len(lheads) > len(rheads):
1493 warn = 1
1501 warn = 1
1494 else:
1502 else:
1495 # add local heads involved in the push
1503 # add local heads involved in the push
1496 updatelheads = [self.changelog.heads(x, lheads)
1504 updatelheads = [self.changelog.heads(x, lheads)
1497 for x in updatelb]
1505 for x in updatelb]
1498 newheads = set(sum(updatelheads, [])) & set(lheads)
1506 newheads = set(sum(updatelheads, [])) & set(lheads)
1499
1507
1500 if not newheads:
1508 if not newheads:
1501 return True
1509 return True
1502
1510
1503 # add heads we don't have or that are not involved in the push
1511 # add heads we don't have or that are not involved in the push
1504 for r in rheads:
1512 for r in rheads:
1505 if r in self.changelog.nodemap:
1513 if r in self.changelog.nodemap:
1506 desc = self.changelog.heads(r, heads)
1514 desc = self.changelog.heads(r, heads)
1507 l = [h for h in heads if h in desc]
1515 l = [h for h in heads if h in desc]
1508 if not l:
1516 if not l:
1509 newheads.add(r)
1517 newheads.add(r)
1510 else:
1518 else:
1511 newheads.add(r)
1519 newheads.add(r)
1512 if len(newheads) > len(rheads):
1520 if len(newheads) > len(rheads):
1513 warn = 1
1521 warn = 1
1514
1522
1515 if warn:
1523 if warn:
1516 if not rheads: # new branch requires --force
1524 if not rheads: # new branch requires --force
1517 self.ui.warn(_("abort: push creates new"
1525 self.ui.warn(_("abort: push creates new"
1518 " remote branch '%s'!\n") %
1526 " remote branch '%s'!\n") %
1519 self[updatelb[0]].branch())
1527 self[updatelb[0]].branch())
1520 else:
1528 else:
1521 self.ui.warn(_("abort: push creates new remote heads!\n"))
1529 self.ui.warn(_("abort: push creates new remote heads!\n"))
1522
1530
1523 self.ui.status(_("(did you forget to merge?"
1531 self.ui.status(_("(did you forget to merge?"
1524 " use push -f to force)\n"))
1532 " use push -f to force)\n"))
1525 return False
1533 return False
1526 return True
1534 return True
1527
1535
1528 if not bases:
1536 if not bases:
1529 self.ui.status(_("no changes found\n"))
1537 self.ui.status(_("no changes found\n"))
1530 return None, 1
1538 return None, 1
1531 elif not force:
1539 elif not force:
1532 # Check for each named branch if we're creating new remote heads.
1540 # Check for each named branch if we're creating new remote heads.
1533 # To be a remote head after push, node must be either:
1541 # To be a remote head after push, node must be either:
1534 # - unknown locally
1542 # - unknown locally
1535 # - a local outgoing head descended from update
1543 # - a local outgoing head descended from update
1536 # - a remote head that's known locally and not
1544 # - a remote head that's known locally and not
1537 # ancestral to an outgoing head
1545 # ancestral to an outgoing head
1538 #
1546 #
1539 # New named branches cannot be created without --force.
1547 # New named branches cannot be created without --force.
1540
1548
1541 if remote_heads != [nullid]:
1549 if remote_heads != [nullid]:
1542 if remote.capable('branchmap'):
1550 if remote.capable('branchmap'):
1543 localhds = {}
1551 localhds = {}
1544 if not revs:
1552 if not revs:
1545 localhds = self.branchmap()
1553 localhds = self.branchmap()
1546 else:
1554 else:
1547 for n in heads:
1555 for n in heads:
1548 branch = self[n].branch()
1556 branch = self[n].branch()
1549 if branch in localhds:
1557 if branch in localhds:
1550 localhds[branch].append(n)
1558 localhds[branch].append(n)
1551 else:
1559 else:
1552 localhds[branch] = [n]
1560 localhds[branch] = [n]
1553
1561
1554 remotehds = remote.branchmap()
1562 remotehds = remote.branchmap()
1555
1563
1556 for lh in localhds:
1564 for lh in localhds:
1557 if lh in remotehds:
1565 if lh in remotehds:
1558 rheads = remotehds[lh]
1566 rheads = remotehds[lh]
1559 else:
1567 else:
1560 rheads = []
1568 rheads = []
1561 lheads = localhds[lh]
1569 lheads = localhds[lh]
1562 updatelb = [upd for upd in update
1570 updatelb = [upd for upd in update
1563 if self[upd].branch() == lh]
1571 if self[upd].branch() == lh]
1564 if not updatelb:
1572 if not updatelb:
1565 continue
1573 continue
1566 if not checkbranch(lheads, rheads, updatelb):
1574 if not checkbranch(lheads, rheads, updatelb):
1567 return None, 0
1575 return None, 0
1568 else:
1576 else:
1569 if not checkbranch(heads, remote_heads, update):
1577 if not checkbranch(heads, remote_heads, update):
1570 return None, 0
1578 return None, 0
1571
1579
1572 if inc:
1580 if inc:
1573 self.ui.warn(_("note: unsynced remote changes!\n"))
1581 self.ui.warn(_("note: unsynced remote changes!\n"))
1574
1582
1575
1583
1576 if revs is None:
1584 if revs is None:
1577 # use the fast path, no race possible on push
1585 # use the fast path, no race possible on push
1578 cg = self._changegroup(common.keys(), 'push')
1586 cg = self._changegroup(common.keys(), 'push')
1579 else:
1587 else:
1580 cg = self.changegroupsubset(update, revs, 'push')
1588 cg = self.changegroupsubset(update, revs, 'push')
1581 return cg, remote_heads
1589 return cg, remote_heads
1582
1590
1583 def push_addchangegroup(self, remote, force, revs):
1591 def push_addchangegroup(self, remote, force, revs):
1584 lock = remote.lock()
1592 lock = remote.lock()
1585 try:
1593 try:
1586 ret = self.prepush(remote, force, revs)
1594 ret = self.prepush(remote, force, revs)
1587 if ret[0] is not None:
1595 if ret[0] is not None:
1588 cg, remote_heads = ret
1596 cg, remote_heads = ret
1589 return remote.addchangegroup(cg, 'push', self.url())
1597 return remote.addchangegroup(cg, 'push', self.url())
1590 return ret[1]
1598 return ret[1]
1591 finally:
1599 finally:
1592 lock.release()
1600 lock.release()
1593
1601
1594 def push_unbundle(self, remote, force, revs):
1602 def push_unbundle(self, remote, force, revs):
1595 # local repo finds heads on server, finds out what revs it
1603 # local repo finds heads on server, finds out what revs it
1596 # must push. once revs transferred, if server finds it has
1604 # must push. once revs transferred, if server finds it has
1597 # different heads (someone else won commit/push race), server
1605 # different heads (someone else won commit/push race), server
1598 # aborts.
1606 # aborts.
1599
1607
1600 ret = self.prepush(remote, force, revs)
1608 ret = self.prepush(remote, force, revs)
1601 if ret[0] is not None:
1609 if ret[0] is not None:
1602 cg, remote_heads = ret
1610 cg, remote_heads = ret
1603 if force: remote_heads = ['force']
1611 if force: remote_heads = ['force']
1604 return remote.unbundle(cg, remote_heads, 'push')
1612 return remote.unbundle(cg, remote_heads, 'push')
1605 return ret[1]
1613 return ret[1]
1606
1614
1607 def changegroupinfo(self, nodes, source):
1615 def changegroupinfo(self, nodes, source):
1608 if self.ui.verbose or source == 'bundle':
1616 if self.ui.verbose or source == 'bundle':
1609 self.ui.status(_("%d changesets found\n") % len(nodes))
1617 self.ui.status(_("%d changesets found\n") % len(nodes))
1610 if self.ui.debugflag:
1618 if self.ui.debugflag:
1611 self.ui.debug("list of changesets:\n")
1619 self.ui.debug("list of changesets:\n")
1612 for node in nodes:
1620 for node in nodes:
1613 self.ui.debug("%s\n" % hex(node))
1621 self.ui.debug("%s\n" % hex(node))
1614
1622
1615 def changegroupsubset(self, bases, heads, source, extranodes=None):
1623 def changegroupsubset(self, bases, heads, source, extranodes=None):
1616 """Compute a changegroup consisting of all the nodes that are
1624 """Compute a changegroup consisting of all the nodes that are
1617 descendents of any of the bases and ancestors of any of the heads.
1625 descendents of any of the bases and ancestors of any of the heads.
1618 Return a chunkbuffer object whose read() method will return
1626 Return a chunkbuffer object whose read() method will return
1619 successive changegroup chunks.
1627 successive changegroup chunks.
1620
1628
1621 It is fairly complex as determining which filenodes and which
1629 It is fairly complex as determining which filenodes and which
1622 manifest nodes need to be included for the changeset to be complete
1630 manifest nodes need to be included for the changeset to be complete
1623 is non-trivial.
1631 is non-trivial.
1624
1632
1625 Another wrinkle is doing the reverse, figuring out which changeset in
1633 Another wrinkle is doing the reverse, figuring out which changeset in
1626 the changegroup a particular filenode or manifestnode belongs to.
1634 the changegroup a particular filenode or manifestnode belongs to.
1627
1635
1628 The caller can specify some nodes that must be included in the
1636 The caller can specify some nodes that must be included in the
1629 changegroup using the extranodes argument. It should be a dict
1637 changegroup using the extranodes argument. It should be a dict
1630 where the keys are the filenames (or 1 for the manifest), and the
1638 where the keys are the filenames (or 1 for the manifest), and the
1631 values are lists of (node, linknode) tuples, where node is a wanted
1639 values are lists of (node, linknode) tuples, where node is a wanted
1632 node and linknode is the changelog node that should be transmitted as
1640 node and linknode is the changelog node that should be transmitted as
1633 the linkrev.
1641 the linkrev.
1634 """
1642 """
1635
1643
1636 if extranodes is None:
1644 if extranodes is None:
1637 # can we go through the fast path ?
1645 # can we go through the fast path ?
1638 heads.sort()
1646 heads.sort()
1639 allheads = self.heads()
1647 allheads = self.heads()
1640 allheads.sort()
1648 allheads.sort()
1641 if heads == allheads:
1649 if heads == allheads:
1642 common = []
1650 common = []
1643 # parents of bases are known from both sides
1651 # parents of bases are known from both sides
1644 for n in bases:
1652 for n in bases:
1645 for p in self.changelog.parents(n):
1653 for p in self.changelog.parents(n):
1646 if p != nullid:
1654 if p != nullid:
1647 common.append(p)
1655 common.append(p)
1648 return self._changegroup(common, source)
1656 return self._changegroup(common, source)
1649
1657
1650 self.hook('preoutgoing', throw=True, source=source)
1658 self.hook('preoutgoing', throw=True, source=source)
1651
1659
1652 # Set up some initial variables
1660 # Set up some initial variables
1653 # Make it easy to refer to self.changelog
1661 # Make it easy to refer to self.changelog
1654 cl = self.changelog
1662 cl = self.changelog
1655 # msng is short for missing - compute the list of changesets in this
1663 # msng is short for missing - compute the list of changesets in this
1656 # changegroup.
1664 # changegroup.
1657 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1665 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1658 self.changegroupinfo(msng_cl_lst, source)
1666 self.changegroupinfo(msng_cl_lst, source)
1659 # Some bases may turn out to be superfluous, and some heads may be
1667 # Some bases may turn out to be superfluous, and some heads may be
1660 # too. nodesbetween will return the minimal set of bases and heads
1668 # too. nodesbetween will return the minimal set of bases and heads
1661 # necessary to re-create the changegroup.
1669 # necessary to re-create the changegroup.
1662
1670
1663 # Known heads are the list of heads that it is assumed the recipient
1671 # Known heads are the list of heads that it is assumed the recipient
1664 # of this changegroup will know about.
1672 # of this changegroup will know about.
1665 knownheads = set()
1673 knownheads = set()
1666 # We assume that all parents of bases are known heads.
1674 # We assume that all parents of bases are known heads.
1667 for n in bases:
1675 for n in bases:
1668 knownheads.update(cl.parents(n))
1676 knownheads.update(cl.parents(n))
1669 knownheads.discard(nullid)
1677 knownheads.discard(nullid)
1670 knownheads = list(knownheads)
1678 knownheads = list(knownheads)
1671 if knownheads:
1679 if knownheads:
1672 # Now that we know what heads are known, we can compute which
1680 # Now that we know what heads are known, we can compute which
1673 # changesets are known. The recipient must know about all
1681 # changesets are known. The recipient must know about all
1674 # changesets required to reach the known heads from the null
1682 # changesets required to reach the known heads from the null
1675 # changeset.
1683 # changeset.
1676 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1684 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1677 junk = None
1685 junk = None
1678 # Transform the list into a set.
1686 # Transform the list into a set.
1679 has_cl_set = set(has_cl_set)
1687 has_cl_set = set(has_cl_set)
1680 else:
1688 else:
1681 # If there were no known heads, the recipient cannot be assumed to
1689 # If there were no known heads, the recipient cannot be assumed to
1682 # know about any changesets.
1690 # know about any changesets.
1683 has_cl_set = set()
1691 has_cl_set = set()
1684
1692
1685 # Make it easy to refer to self.manifest
1693 # Make it easy to refer to self.manifest
1686 mnfst = self.manifest
1694 mnfst = self.manifest
1687 # We don't know which manifests are missing yet
1695 # We don't know which manifests are missing yet
1688 msng_mnfst_set = {}
1696 msng_mnfst_set = {}
1689 # Nor do we know which filenodes are missing.
1697 # Nor do we know which filenodes are missing.
1690 msng_filenode_set = {}
1698 msng_filenode_set = {}
1691
1699
1692 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1700 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1693 junk = None
1701 junk = None
1694
1702
1695 # A changeset always belongs to itself, so the changenode lookup
1703 # A changeset always belongs to itself, so the changenode lookup
1696 # function for a changenode is identity.
1704 # function for a changenode is identity.
1697 def identity(x):
1705 def identity(x):
1698 return x
1706 return x
1699
1707
1700 # If we determine that a particular file or manifest node must be a
1708 # If we determine that a particular file or manifest node must be a
1701 # node that the recipient of the changegroup will already have, we can
1709 # node that the recipient of the changegroup will already have, we can
1702 # also assume the recipient will have all the parents. This function
1710 # also assume the recipient will have all the parents. This function
1703 # prunes them from the set of missing nodes.
1711 # prunes them from the set of missing nodes.
1704 def prune_parents(revlog, hasset, msngset):
1712 def prune_parents(revlog, hasset, msngset):
1705 haslst = list(hasset)
1713 haslst = list(hasset)
1706 haslst.sort(key=revlog.rev)
1714 haslst.sort(key=revlog.rev)
1707 for node in haslst:
1715 for node in haslst:
1708 parentlst = [p for p in revlog.parents(node) if p != nullid]
1716 parentlst = [p for p in revlog.parents(node) if p != nullid]
1709 while parentlst:
1717 while parentlst:
1710 n = parentlst.pop()
1718 n = parentlst.pop()
1711 if n not in hasset:
1719 if n not in hasset:
1712 hasset.add(n)
1720 hasset.add(n)
1713 p = [p for p in revlog.parents(n) if p != nullid]
1721 p = [p for p in revlog.parents(n) if p != nullid]
1714 parentlst.extend(p)
1722 parentlst.extend(p)
1715 for n in hasset:
1723 for n in hasset:
1716 msngset.pop(n, None)
1724 msngset.pop(n, None)
1717
1725
1718 # This is a function generating function used to set up an environment
1726 # This is a function generating function used to set up an environment
1719 # for the inner function to execute in.
1727 # for the inner function to execute in.
1720 def manifest_and_file_collector(changedfileset):
1728 def manifest_and_file_collector(changedfileset):
1721 # This is an information gathering function that gathers
1729 # This is an information gathering function that gathers
1722 # information from each changeset node that goes out as part of
1730 # information from each changeset node that goes out as part of
1723 # the changegroup. The information gathered is a list of which
1731 # the changegroup. The information gathered is a list of which
1724 # manifest nodes are potentially required (the recipient may
1732 # manifest nodes are potentially required (the recipient may
1725 # already have them) and total list of all files which were
1733 # already have them) and total list of all files which were
1726 # changed in any changeset in the changegroup.
1734 # changed in any changeset in the changegroup.
1727 #
1735 #
1728 # We also remember the first changenode we saw any manifest
1736 # We also remember the first changenode we saw any manifest
1729 # referenced by so we can later determine which changenode 'owns'
1737 # referenced by so we can later determine which changenode 'owns'
1730 # the manifest.
1738 # the manifest.
1731 def collect_manifests_and_files(clnode):
1739 def collect_manifests_and_files(clnode):
1732 c = cl.read(clnode)
1740 c = cl.read(clnode)
1733 for f in c[3]:
1741 for f in c[3]:
1734 # This is to make sure we only have one instance of each
1742 # This is to make sure we only have one instance of each
1735 # filename string for each filename.
1743 # filename string for each filename.
1736 changedfileset.setdefault(f, f)
1744 changedfileset.setdefault(f, f)
1737 msng_mnfst_set.setdefault(c[0], clnode)
1745 msng_mnfst_set.setdefault(c[0], clnode)
1738 return collect_manifests_and_files
1746 return collect_manifests_and_files
1739
1747
1740 # Figure out which manifest nodes (of the ones we think might be part
1748 # Figure out which manifest nodes (of the ones we think might be part
1741 # of the changegroup) the recipient must know about and remove them
1749 # of the changegroup) the recipient must know about and remove them
1742 # from the changegroup.
1750 # from the changegroup.
1743 def prune_manifests():
1751 def prune_manifests():
1744 has_mnfst_set = set()
1752 has_mnfst_set = set()
1745 for n in msng_mnfst_set:
1753 for n in msng_mnfst_set:
1746 # If a 'missing' manifest thinks it belongs to a changenode
1754 # If a 'missing' manifest thinks it belongs to a changenode
1747 # the recipient is assumed to have, obviously the recipient
1755 # the recipient is assumed to have, obviously the recipient
1748 # must have that manifest.
1756 # must have that manifest.
1749 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1757 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1750 if linknode in has_cl_set:
1758 if linknode in has_cl_set:
1751 has_mnfst_set.add(n)
1759 has_mnfst_set.add(n)
1752 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1760 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1753
1761
1754 # Use the information collected in collect_manifests_and_files to say
1762 # Use the information collected in collect_manifests_and_files to say
1755 # which changenode any manifestnode belongs to.
1763 # which changenode any manifestnode belongs to.
1756 def lookup_manifest_link(mnfstnode):
1764 def lookup_manifest_link(mnfstnode):
1757 return msng_mnfst_set[mnfstnode]
1765 return msng_mnfst_set[mnfstnode]
1758
1766
1759 # A function generating function that sets up the initial environment
1767 # A function generating function that sets up the initial environment
1760 # the inner function.
1768 # the inner function.
1761 def filenode_collector(changedfiles):
1769 def filenode_collector(changedfiles):
1762 next_rev = [0]
1770 next_rev = [0]
1763 # This gathers information from each manifestnode included in the
1771 # This gathers information from each manifestnode included in the
1764 # changegroup about which filenodes the manifest node references
1772 # changegroup about which filenodes the manifest node references
1765 # so we can include those in the changegroup too.
1773 # so we can include those in the changegroup too.
1766 #
1774 #
1767 # It also remembers which changenode each filenode belongs to. It
1775 # It also remembers which changenode each filenode belongs to. It
1768 # does this by assuming the a filenode belongs to the changenode
1776 # does this by assuming the a filenode belongs to the changenode
1769 # the first manifest that references it belongs to.
1777 # the first manifest that references it belongs to.
1770 def collect_msng_filenodes(mnfstnode):
1778 def collect_msng_filenodes(mnfstnode):
1771 r = mnfst.rev(mnfstnode)
1779 r = mnfst.rev(mnfstnode)
1772 if r == next_rev[0]:
1780 if r == next_rev[0]:
1773 # If the last rev we looked at was the one just previous,
1781 # If the last rev we looked at was the one just previous,
1774 # we only need to see a diff.
1782 # we only need to see a diff.
1775 deltamf = mnfst.readdelta(mnfstnode)
1783 deltamf = mnfst.readdelta(mnfstnode)
1776 # For each line in the delta
1784 # For each line in the delta
1777 for f, fnode in deltamf.iteritems():
1785 for f, fnode in deltamf.iteritems():
1778 f = changedfiles.get(f, None)
1786 f = changedfiles.get(f, None)
1779 # And if the file is in the list of files we care
1787 # And if the file is in the list of files we care
1780 # about.
1788 # about.
1781 if f is not None:
1789 if f is not None:
1782 # Get the changenode this manifest belongs to
1790 # Get the changenode this manifest belongs to
1783 clnode = msng_mnfst_set[mnfstnode]
1791 clnode = msng_mnfst_set[mnfstnode]
1784 # Create the set of filenodes for the file if
1792 # Create the set of filenodes for the file if
1785 # there isn't one already.
1793 # there isn't one already.
1786 ndset = msng_filenode_set.setdefault(f, {})
1794 ndset = msng_filenode_set.setdefault(f, {})
1787 # And set the filenode's changelog node to the
1795 # And set the filenode's changelog node to the
1788 # manifest's if it hasn't been set already.
1796 # manifest's if it hasn't been set already.
1789 ndset.setdefault(fnode, clnode)
1797 ndset.setdefault(fnode, clnode)
1790 else:
1798 else:
1791 # Otherwise we need a full manifest.
1799 # Otherwise we need a full manifest.
1792 m = mnfst.read(mnfstnode)
1800 m = mnfst.read(mnfstnode)
1793 # For every file in we care about.
1801 # For every file in we care about.
1794 for f in changedfiles:
1802 for f in changedfiles:
1795 fnode = m.get(f, None)
1803 fnode = m.get(f, None)
1796 # If it's in the manifest
1804 # If it's in the manifest
1797 if fnode is not None:
1805 if fnode is not None:
1798 # See comments above.
1806 # See comments above.
1799 clnode = msng_mnfst_set[mnfstnode]
1807 clnode = msng_mnfst_set[mnfstnode]
1800 ndset = msng_filenode_set.setdefault(f, {})
1808 ndset = msng_filenode_set.setdefault(f, {})
1801 ndset.setdefault(fnode, clnode)
1809 ndset.setdefault(fnode, clnode)
1802 # Remember the revision we hope to see next.
1810 # Remember the revision we hope to see next.
1803 next_rev[0] = r + 1
1811 next_rev[0] = r + 1
1804 return collect_msng_filenodes
1812 return collect_msng_filenodes
1805
1813
1806 # We have a list of filenodes we think we need for a file, lets remove
1814 # We have a list of filenodes we think we need for a file, lets remove
1807 # all those we know the recipient must have.
1815 # all those we know the recipient must have.
1808 def prune_filenodes(f, filerevlog):
1816 def prune_filenodes(f, filerevlog):
1809 msngset = msng_filenode_set[f]
1817 msngset = msng_filenode_set[f]
1810 hasset = set()
1818 hasset = set()
1811 # If a 'missing' filenode thinks it belongs to a changenode we
1819 # If a 'missing' filenode thinks it belongs to a changenode we
1812 # assume the recipient must have, then the recipient must have
1820 # assume the recipient must have, then the recipient must have
1813 # that filenode.
1821 # that filenode.
1814 for n in msngset:
1822 for n in msngset:
1815 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1823 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1816 if clnode in has_cl_set:
1824 if clnode in has_cl_set:
1817 hasset.add(n)
1825 hasset.add(n)
1818 prune_parents(filerevlog, hasset, msngset)
1826 prune_parents(filerevlog, hasset, msngset)
1819
1827
1820 # A function generator function that sets up the a context for the
1828 # A function generator function that sets up the a context for the
1821 # inner function.
1829 # inner function.
1822 def lookup_filenode_link_func(fname):
1830 def lookup_filenode_link_func(fname):
1823 msngset = msng_filenode_set[fname]
1831 msngset = msng_filenode_set[fname]
1824 # Lookup the changenode the filenode belongs to.
1832 # Lookup the changenode the filenode belongs to.
1825 def lookup_filenode_link(fnode):
1833 def lookup_filenode_link(fnode):
1826 return msngset[fnode]
1834 return msngset[fnode]
1827 return lookup_filenode_link
1835 return lookup_filenode_link
1828
1836
1829 # Add the nodes that were explicitly requested.
1837 # Add the nodes that were explicitly requested.
1830 def add_extra_nodes(name, nodes):
1838 def add_extra_nodes(name, nodes):
1831 if not extranodes or name not in extranodes:
1839 if not extranodes or name not in extranodes:
1832 return
1840 return
1833
1841
1834 for node, linknode in extranodes[name]:
1842 for node, linknode in extranodes[name]:
1835 if node not in nodes:
1843 if node not in nodes:
1836 nodes[node] = linknode
1844 nodes[node] = linknode
1837
1845
1838 # Now that we have all theses utility functions to help out and
1846 # Now that we have all theses utility functions to help out and
1839 # logically divide up the task, generate the group.
1847 # logically divide up the task, generate the group.
1840 def gengroup():
1848 def gengroup():
1841 # The set of changed files starts empty.
1849 # The set of changed files starts empty.
1842 changedfiles = {}
1850 changedfiles = {}
1843 # Create a changenode group generator that will call our functions
1851 # Create a changenode group generator that will call our functions
1844 # back to lookup the owning changenode and collect information.
1852 # back to lookup the owning changenode and collect information.
1845 group = cl.group(msng_cl_lst, identity,
1853 group = cl.group(msng_cl_lst, identity,
1846 manifest_and_file_collector(changedfiles))
1854 manifest_and_file_collector(changedfiles))
1847 for chnk in group:
1855 for chnk in group:
1848 yield chnk
1856 yield chnk
1849
1857
1850 # The list of manifests has been collected by the generator
1858 # The list of manifests has been collected by the generator
1851 # calling our functions back.
1859 # calling our functions back.
1852 prune_manifests()
1860 prune_manifests()
1853 add_extra_nodes(1, msng_mnfst_set)
1861 add_extra_nodes(1, msng_mnfst_set)
1854 msng_mnfst_lst = msng_mnfst_set.keys()
1862 msng_mnfst_lst = msng_mnfst_set.keys()
1855 # Sort the manifestnodes by revision number.
1863 # Sort the manifestnodes by revision number.
1856 msng_mnfst_lst.sort(key=mnfst.rev)
1864 msng_mnfst_lst.sort(key=mnfst.rev)
1857 # Create a generator for the manifestnodes that calls our lookup
1865 # Create a generator for the manifestnodes that calls our lookup
1858 # and data collection functions back.
1866 # and data collection functions back.
1859 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1867 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1860 filenode_collector(changedfiles))
1868 filenode_collector(changedfiles))
1861 for chnk in group:
1869 for chnk in group:
1862 yield chnk
1870 yield chnk
1863
1871
1864 # These are no longer needed, dereference and toss the memory for
1872 # These are no longer needed, dereference and toss the memory for
1865 # them.
1873 # them.
1866 msng_mnfst_lst = None
1874 msng_mnfst_lst = None
1867 msng_mnfst_set.clear()
1875 msng_mnfst_set.clear()
1868
1876
1869 if extranodes:
1877 if extranodes:
1870 for fname in extranodes:
1878 for fname in extranodes:
1871 if isinstance(fname, int):
1879 if isinstance(fname, int):
1872 continue
1880 continue
1873 msng_filenode_set.setdefault(fname, {})
1881 msng_filenode_set.setdefault(fname, {})
1874 changedfiles[fname] = 1
1882 changedfiles[fname] = 1
1875 # Go through all our files in order sorted by name.
1883 # Go through all our files in order sorted by name.
1876 for fname in sorted(changedfiles):
1884 for fname in sorted(changedfiles):
1877 filerevlog = self.file(fname)
1885 filerevlog = self.file(fname)
1878 if not len(filerevlog):
1886 if not len(filerevlog):
1879 raise util.Abort(_("empty or missing revlog for %s") % fname)
1887 raise util.Abort(_("empty or missing revlog for %s") % fname)
1880 # Toss out the filenodes that the recipient isn't really
1888 # Toss out the filenodes that the recipient isn't really
1881 # missing.
1889 # missing.
1882 if fname in msng_filenode_set:
1890 if fname in msng_filenode_set:
1883 prune_filenodes(fname, filerevlog)
1891 prune_filenodes(fname, filerevlog)
1884 add_extra_nodes(fname, msng_filenode_set[fname])
1892 add_extra_nodes(fname, msng_filenode_set[fname])
1885 msng_filenode_lst = msng_filenode_set[fname].keys()
1893 msng_filenode_lst = msng_filenode_set[fname].keys()
1886 else:
1894 else:
1887 msng_filenode_lst = []
1895 msng_filenode_lst = []
1888 # If any filenodes are left, generate the group for them,
1896 # If any filenodes are left, generate the group for them,
1889 # otherwise don't bother.
1897 # otherwise don't bother.
1890 if len(msng_filenode_lst) > 0:
1898 if len(msng_filenode_lst) > 0:
1891 yield changegroup.chunkheader(len(fname))
1899 yield changegroup.chunkheader(len(fname))
1892 yield fname
1900 yield fname
1893 # Sort the filenodes by their revision #
1901 # Sort the filenodes by their revision #
1894 msng_filenode_lst.sort(key=filerevlog.rev)
1902 msng_filenode_lst.sort(key=filerevlog.rev)
1895 # Create a group generator and only pass in a changenode
1903 # Create a group generator and only pass in a changenode
1896 # lookup function as we need to collect no information
1904 # lookup function as we need to collect no information
1897 # from filenodes.
1905 # from filenodes.
1898 group = filerevlog.group(msng_filenode_lst,
1906 group = filerevlog.group(msng_filenode_lst,
1899 lookup_filenode_link_func(fname))
1907 lookup_filenode_link_func(fname))
1900 for chnk in group:
1908 for chnk in group:
1901 yield chnk
1909 yield chnk
1902 if fname in msng_filenode_set:
1910 if fname in msng_filenode_set:
1903 # Don't need this anymore, toss it to free memory.
1911 # Don't need this anymore, toss it to free memory.
1904 del msng_filenode_set[fname]
1912 del msng_filenode_set[fname]
1905 # Signal that no more groups are left.
1913 # Signal that no more groups are left.
1906 yield changegroup.closechunk()
1914 yield changegroup.closechunk()
1907
1915
1908 if msng_cl_lst:
1916 if msng_cl_lst:
1909 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1917 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1910
1918
1911 return util.chunkbuffer(gengroup())
1919 return util.chunkbuffer(gengroup())
1912
1920
1913 def changegroup(self, basenodes, source):
1921 def changegroup(self, basenodes, source):
1914 # to avoid a race we use changegroupsubset() (issue1320)
1922 # to avoid a race we use changegroupsubset() (issue1320)
1915 return self.changegroupsubset(basenodes, self.heads(), source)
1923 return self.changegroupsubset(basenodes, self.heads(), source)
1916
1924
1917 def _changegroup(self, common, source):
1925 def _changegroup(self, common, source):
1918 """Compute the changegroup of all nodes that we have that a recipient
1926 """Compute the changegroup of all nodes that we have that a recipient
1919 doesn't. Return a chunkbuffer object whose read() method will return
1927 doesn't. Return a chunkbuffer object whose read() method will return
1920 successive changegroup chunks.
1928 successive changegroup chunks.
1921
1929
1922 This is much easier than the previous function as we can assume that
1930 This is much easier than the previous function as we can assume that
1923 the recipient has any changenode we aren't sending them.
1931 the recipient has any changenode we aren't sending them.
1924
1932
1925 common is the set of common nodes between remote and self"""
1933 common is the set of common nodes between remote and self"""
1926
1934
1927 self.hook('preoutgoing', throw=True, source=source)
1935 self.hook('preoutgoing', throw=True, source=source)
1928
1936
1929 cl = self.changelog
1937 cl = self.changelog
1930 nodes = cl.findmissing(common)
1938 nodes = cl.findmissing(common)
1931 revset = set([cl.rev(n) for n in nodes])
1939 revset = set([cl.rev(n) for n in nodes])
1932 self.changegroupinfo(nodes, source)
1940 self.changegroupinfo(nodes, source)
1933
1941
1934 def identity(x):
1942 def identity(x):
1935 return x
1943 return x
1936
1944
1937 def gennodelst(log):
1945 def gennodelst(log):
1938 for r in log:
1946 for r in log:
1939 if log.linkrev(r) in revset:
1947 if log.linkrev(r) in revset:
1940 yield log.node(r)
1948 yield log.node(r)
1941
1949
1942 def changed_file_collector(changedfileset):
1950 def changed_file_collector(changedfileset):
1943 def collect_changed_files(clnode):
1951 def collect_changed_files(clnode):
1944 c = cl.read(clnode)
1952 c = cl.read(clnode)
1945 changedfileset.update(c[3])
1953 changedfileset.update(c[3])
1946 return collect_changed_files
1954 return collect_changed_files
1947
1955
1948 def lookuprevlink_func(revlog):
1956 def lookuprevlink_func(revlog):
1949 def lookuprevlink(n):
1957 def lookuprevlink(n):
1950 return cl.node(revlog.linkrev(revlog.rev(n)))
1958 return cl.node(revlog.linkrev(revlog.rev(n)))
1951 return lookuprevlink
1959 return lookuprevlink
1952
1960
1953 def gengroup():
1961 def gengroup():
1954 '''yield a sequence of changegroup chunks (strings)'''
1962 '''yield a sequence of changegroup chunks (strings)'''
1955 # construct a list of all changed files
1963 # construct a list of all changed files
1956 changedfiles = set()
1964 changedfiles = set()
1957
1965
1958 for chnk in cl.group(nodes, identity,
1966 for chnk in cl.group(nodes, identity,
1959 changed_file_collector(changedfiles)):
1967 changed_file_collector(changedfiles)):
1960 yield chnk
1968 yield chnk
1961
1969
1962 mnfst = self.manifest
1970 mnfst = self.manifest
1963 nodeiter = gennodelst(mnfst)
1971 nodeiter = gennodelst(mnfst)
1964 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1972 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1965 yield chnk
1973 yield chnk
1966
1974
1967 for fname in sorted(changedfiles):
1975 for fname in sorted(changedfiles):
1968 filerevlog = self.file(fname)
1976 filerevlog = self.file(fname)
1969 if not len(filerevlog):
1977 if not len(filerevlog):
1970 raise util.Abort(_("empty or missing revlog for %s") % fname)
1978 raise util.Abort(_("empty or missing revlog for %s") % fname)
1971 nodeiter = gennodelst(filerevlog)
1979 nodeiter = gennodelst(filerevlog)
1972 nodeiter = list(nodeiter)
1980 nodeiter = list(nodeiter)
1973 if nodeiter:
1981 if nodeiter:
1974 yield changegroup.chunkheader(len(fname))
1982 yield changegroup.chunkheader(len(fname))
1975 yield fname
1983 yield fname
1976 lookup = lookuprevlink_func(filerevlog)
1984 lookup = lookuprevlink_func(filerevlog)
1977 for chnk in filerevlog.group(nodeiter, lookup):
1985 for chnk in filerevlog.group(nodeiter, lookup):
1978 yield chnk
1986 yield chnk
1979
1987
1980 yield changegroup.closechunk()
1988 yield changegroup.closechunk()
1981
1989
1982 if nodes:
1990 if nodes:
1983 self.hook('outgoing', node=hex(nodes[0]), source=source)
1991 self.hook('outgoing', node=hex(nodes[0]), source=source)
1984
1992
1985 return util.chunkbuffer(gengroup())
1993 return util.chunkbuffer(gengroup())
1986
1994
1987 def addchangegroup(self, source, srctype, url, emptyok=False):
1995 def addchangegroup(self, source, srctype, url, emptyok=False):
1988 """add changegroup to repo.
1996 """add changegroup to repo.
1989
1997
1990 return values:
1998 return values:
1991 - nothing changed or no source: 0
1999 - nothing changed or no source: 0
1992 - more heads than before: 1+added heads (2..n)
2000 - more heads than before: 1+added heads (2..n)
1993 - less heads than before: -1-removed heads (-2..-n)
2001 - less heads than before: -1-removed heads (-2..-n)
1994 - number of heads stays the same: 1
2002 - number of heads stays the same: 1
1995 """
2003 """
1996 def csmap(x):
2004 def csmap(x):
1997 self.ui.debug("add changeset %s\n" % short(x))
2005 self.ui.debug("add changeset %s\n" % short(x))
1998 return len(cl)
2006 return len(cl)
1999
2007
2000 def revmap(x):
2008 def revmap(x):
2001 return cl.rev(x)
2009 return cl.rev(x)
2002
2010
2003 if not source:
2011 if not source:
2004 return 0
2012 return 0
2005
2013
2006 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2014 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2007
2015
2008 changesets = files = revisions = 0
2016 changesets = files = revisions = 0
2009
2017
2010 # write changelog data to temp files so concurrent readers will not see
2018 # write changelog data to temp files so concurrent readers will not see
2011 # inconsistent view
2019 # inconsistent view
2012 cl = self.changelog
2020 cl = self.changelog
2013 cl.delayupdate()
2021 cl.delayupdate()
2014 oldheads = len(cl.heads())
2022 oldheads = len(cl.heads())
2015
2023
2016 tr = self.transaction()
2024 tr = self.transaction()
2017 try:
2025 try:
2018 trp = weakref.proxy(tr)
2026 trp = weakref.proxy(tr)
2019 # pull off the changeset group
2027 # pull off the changeset group
2020 self.ui.status(_("adding changesets\n"))
2028 self.ui.status(_("adding changesets\n"))
2021 clstart = len(cl)
2029 clstart = len(cl)
2022 chunkiter = changegroup.chunkiter(source)
2030 chunkiter = changegroup.chunkiter(source)
2023 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2031 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2024 raise util.Abort(_("received changelog group is empty"))
2032 raise util.Abort(_("received changelog group is empty"))
2025 clend = len(cl)
2033 clend = len(cl)
2026 changesets = clend - clstart
2034 changesets = clend - clstart
2027
2035
2028 # pull off the manifest group
2036 # pull off the manifest group
2029 self.ui.status(_("adding manifests\n"))
2037 self.ui.status(_("adding manifests\n"))
2030 chunkiter = changegroup.chunkiter(source)
2038 chunkiter = changegroup.chunkiter(source)
2031 # no need to check for empty manifest group here:
2039 # no need to check for empty manifest group here:
2032 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2040 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2033 # no new manifest will be created and the manifest group will
2041 # no new manifest will be created and the manifest group will
2034 # be empty during the pull
2042 # be empty during the pull
2035 self.manifest.addgroup(chunkiter, revmap, trp)
2043 self.manifest.addgroup(chunkiter, revmap, trp)
2036
2044
2037 # process the files
2045 # process the files
2038 self.ui.status(_("adding file changes\n"))
2046 self.ui.status(_("adding file changes\n"))
2039 while 1:
2047 while 1:
2040 f = changegroup.getchunk(source)
2048 f = changegroup.getchunk(source)
2041 if not f:
2049 if not f:
2042 break
2050 break
2043 self.ui.debug("adding %s revisions\n" % f)
2051 self.ui.debug("adding %s revisions\n" % f)
2044 fl = self.file(f)
2052 fl = self.file(f)
2045 o = len(fl)
2053 o = len(fl)
2046 chunkiter = changegroup.chunkiter(source)
2054 chunkiter = changegroup.chunkiter(source)
2047 if fl.addgroup(chunkiter, revmap, trp) is None:
2055 if fl.addgroup(chunkiter, revmap, trp) is None:
2048 raise util.Abort(_("received file revlog group is empty"))
2056 raise util.Abort(_("received file revlog group is empty"))
2049 revisions += len(fl) - o
2057 revisions += len(fl) - o
2050 files += 1
2058 files += 1
2051
2059
2052 newheads = len(cl.heads())
2060 newheads = len(cl.heads())
2053 heads = ""
2061 heads = ""
2054 if oldheads and newheads != oldheads:
2062 if oldheads and newheads != oldheads:
2055 heads = _(" (%+d heads)") % (newheads - oldheads)
2063 heads = _(" (%+d heads)") % (newheads - oldheads)
2056
2064
2057 self.ui.status(_("added %d changesets"
2065 self.ui.status(_("added %d changesets"
2058 " with %d changes to %d files%s\n")
2066 " with %d changes to %d files%s\n")
2059 % (changesets, revisions, files, heads))
2067 % (changesets, revisions, files, heads))
2060
2068
2061 if changesets > 0:
2069 if changesets > 0:
2062 p = lambda: cl.writepending() and self.root or ""
2070 p = lambda: cl.writepending() and self.root or ""
2063 self.hook('pretxnchangegroup', throw=True,
2071 self.hook('pretxnchangegroup', throw=True,
2064 node=hex(cl.node(clstart)), source=srctype,
2072 node=hex(cl.node(clstart)), source=srctype,
2065 url=url, pending=p)
2073 url=url, pending=p)
2066
2074
2067 # make changelog see real files again
2075 # make changelog see real files again
2068 cl.finalize(trp)
2076 cl.finalize(trp)
2069
2077
2070 tr.close()
2078 tr.close()
2071 finally:
2079 finally:
2072 del tr
2080 del tr
2073
2081
2074 if changesets > 0:
2082 if changesets > 0:
2075 # forcefully update the on-disk branch cache
2083 # forcefully update the on-disk branch cache
2076 self.ui.debug("updating the branch cache\n")
2084 self.ui.debug("updating the branch cache\n")
2077 self.branchtags()
2085 self.branchtags()
2078 self.hook("changegroup", node=hex(cl.node(clstart)),
2086 self.hook("changegroup", node=hex(cl.node(clstart)),
2079 source=srctype, url=url)
2087 source=srctype, url=url)
2080
2088
2081 for i in xrange(clstart, clend):
2089 for i in xrange(clstart, clend):
2082 self.hook("incoming", node=hex(cl.node(i)),
2090 self.hook("incoming", node=hex(cl.node(i)),
2083 source=srctype, url=url)
2091 source=srctype, url=url)
2084
2092
2085 # never return 0 here:
2093 # never return 0 here:
2086 if newheads < oldheads:
2094 if newheads < oldheads:
2087 return newheads - oldheads - 1
2095 return newheads - oldheads - 1
2088 else:
2096 else:
2089 return newheads - oldheads + 1
2097 return newheads - oldheads + 1
2090
2098
2091
2099
2092 def stream_in(self, remote):
2100 def stream_in(self, remote):
2093 fp = remote.stream_out()
2101 fp = remote.stream_out()
2094 l = fp.readline()
2102 l = fp.readline()
2095 try:
2103 try:
2096 resp = int(l)
2104 resp = int(l)
2097 except ValueError:
2105 except ValueError:
2098 raise error.ResponseError(
2106 raise error.ResponseError(
2099 _('Unexpected response from remote server:'), l)
2107 _('Unexpected response from remote server:'), l)
2100 if resp == 1:
2108 if resp == 1:
2101 raise util.Abort(_('operation forbidden by server'))
2109 raise util.Abort(_('operation forbidden by server'))
2102 elif resp == 2:
2110 elif resp == 2:
2103 raise util.Abort(_('locking the remote repository failed'))
2111 raise util.Abort(_('locking the remote repository failed'))
2104 elif resp != 0:
2112 elif resp != 0:
2105 raise util.Abort(_('the server sent an unknown error code'))
2113 raise util.Abort(_('the server sent an unknown error code'))
2106 self.ui.status(_('streaming all changes\n'))
2114 self.ui.status(_('streaming all changes\n'))
2107 l = fp.readline()
2115 l = fp.readline()
2108 try:
2116 try:
2109 total_files, total_bytes = map(int, l.split(' ', 1))
2117 total_files, total_bytes = map(int, l.split(' ', 1))
2110 except (ValueError, TypeError):
2118 except (ValueError, TypeError):
2111 raise error.ResponseError(
2119 raise error.ResponseError(
2112 _('Unexpected response from remote server:'), l)
2120 _('Unexpected response from remote server:'), l)
2113 self.ui.status(_('%d files to transfer, %s of data\n') %
2121 self.ui.status(_('%d files to transfer, %s of data\n') %
2114 (total_files, util.bytecount(total_bytes)))
2122 (total_files, util.bytecount(total_bytes)))
2115 start = time.time()
2123 start = time.time()
2116 for i in xrange(total_files):
2124 for i in xrange(total_files):
2117 # XXX doesn't support '\n' or '\r' in filenames
2125 # XXX doesn't support '\n' or '\r' in filenames
2118 l = fp.readline()
2126 l = fp.readline()
2119 try:
2127 try:
2120 name, size = l.split('\0', 1)
2128 name, size = l.split('\0', 1)
2121 size = int(size)
2129 size = int(size)
2122 except (ValueError, TypeError):
2130 except (ValueError, TypeError):
2123 raise error.ResponseError(
2131 raise error.ResponseError(
2124 _('Unexpected response from remote server:'), l)
2132 _('Unexpected response from remote server:'), l)
2125 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2133 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2126 # for backwards compat, name was partially encoded
2134 # for backwards compat, name was partially encoded
2127 ofp = self.sopener(store.decodedir(name), 'w')
2135 ofp = self.sopener(store.decodedir(name), 'w')
2128 for chunk in util.filechunkiter(fp, limit=size):
2136 for chunk in util.filechunkiter(fp, limit=size):
2129 ofp.write(chunk)
2137 ofp.write(chunk)
2130 ofp.close()
2138 ofp.close()
2131 elapsed = time.time() - start
2139 elapsed = time.time() - start
2132 if elapsed <= 0:
2140 if elapsed <= 0:
2133 elapsed = 0.001
2141 elapsed = 0.001
2134 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2142 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2135 (util.bytecount(total_bytes), elapsed,
2143 (util.bytecount(total_bytes), elapsed,
2136 util.bytecount(total_bytes / elapsed)))
2144 util.bytecount(total_bytes / elapsed)))
2137 self.invalidate()
2145 self.invalidate()
2138 return len(self.heads()) + 1
2146 return len(self.heads()) + 1
2139
2147
2140 def clone(self, remote, heads=[], stream=False):
2148 def clone(self, remote, heads=[], stream=False):
2141 '''clone remote repository.
2149 '''clone remote repository.
2142
2150
2143 keyword arguments:
2151 keyword arguments:
2144 heads: list of revs to clone (forces use of pull)
2152 heads: list of revs to clone (forces use of pull)
2145 stream: use streaming clone if possible'''
2153 stream: use streaming clone if possible'''
2146
2154
2147 # now, all clients that can request uncompressed clones can
2155 # now, all clients that can request uncompressed clones can
2148 # read repo formats supported by all servers that can serve
2156 # read repo formats supported by all servers that can serve
2149 # them.
2157 # them.
2150
2158
2151 # if revlog format changes, client will have to check version
2159 # if revlog format changes, client will have to check version
2152 # and format flags on "stream" capability, and use
2160 # and format flags on "stream" capability, and use
2153 # uncompressed only if compatible.
2161 # uncompressed only if compatible.
2154
2162
2155 if stream and not heads and remote.capable('stream'):
2163 if stream and not heads and remote.capable('stream'):
2156 return self.stream_in(remote)
2164 return self.stream_in(remote)
2157 return self.pull(remote, heads)
2165 return self.pull(remote, heads)
2158
2166
2159 # used to avoid circular references so destructors work
2167 # used to avoid circular references so destructors work
2160 def aftertrans(files):
2168 def aftertrans(files):
2161 renamefiles = [tuple(t) for t in files]
2169 renamefiles = [tuple(t) for t in files]
2162 def a():
2170 def a():
2163 for src, dest in renamefiles:
2171 for src, dest in renamefiles:
2164 util.rename(src, dest)
2172 util.rename(src, dest)
2165 return a
2173 return a
2166
2174
2167 def instance(ui, path, create):
2175 def instance(ui, path, create):
2168 return localrepository(ui, util.drop_scheme('file', path), create)
2176 return localrepository(ui, util.drop_scheme('file', path), create)
2169
2177
2170 def islocal(path):
2178 def islocal(path):
2171 return True
2179 return True
General Comments 0
You need to be logged in to leave comments. Login now