Add comments to example profiles
This commit is contained in:
parent
9ad48a0d44
commit
d70957ccd8
3 changed files with 27 additions and 0 deletions
|
@ -4,6 +4,7 @@ import requests
|
|||
import codecs
|
||||
|
||||
|
||||
# Reading the dataset passport to determine an URL of the latest dataset version
|
||||
def download_url(dataset_id='7705851331-museums'):
|
||||
r = requests.get('http://opendata.mkrf.ru/opendata/{}/meta.json'.format(dataset_id))
|
||||
if r.status_code != 200 or len(r.content) == 0:
|
||||
|
@ -24,6 +25,7 @@ master_tags = ('official_name', 'phone', 'opening_hours', 'website')
|
|||
|
||||
def dataset(fileobj):
|
||||
def make_wd_ranges(r):
|
||||
"""Converts e.g. [0,1,4] into 'Mo-Tu, Fr'."""
|
||||
wd = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']
|
||||
res = wd[r[0]]
|
||||
in_range = False
|
||||
|
@ -39,6 +41,8 @@ def dataset(fileobj):
|
|||
return res
|
||||
|
||||
def parse_hours(h):
|
||||
"""Receives a dict {'0': {'from': '10:00:00', 'to': '18:00:00'}, ...}
|
||||
and returns a proper opening_hours value."""
|
||||
days = {}
|
||||
for wd, d in h.items():
|
||||
if not d['from']:
|
||||
|
|
|
@ -1,7 +1,14 @@
|
|||
# This profile reads a prepared JSON, thus no "dataset" function
|
||||
|
||||
# Value for the changeset "source" tag
|
||||
source = 'Navads'
|
||||
# Keeping identifiers in a "ref:navads_shell" tag
|
||||
dataset_id = 'navads_shell'
|
||||
# Overpass API query is a simple [amenity="fuel"]
|
||||
query = [('amenity', 'fuel')]
|
||||
# These tag values override values on OSM objects
|
||||
master_tags = ('brand', 'addr:postcode', 'phone', 'opening_hours')
|
||||
# Looking at most 50 meters around a dataset point
|
||||
max_distance = 50
|
||||
|
||||
|
||||
|
@ -16,10 +23,15 @@ def format_phone(ph):
|
|||
return ph
|
||||
|
||||
|
||||
# Tag transformation
|
||||
transform = {
|
||||
# Just add this tag
|
||||
'amenity': 'fuel',
|
||||
# Rename key
|
||||
'postal_code': '>addr:postcode',
|
||||
# Use a function to transform a value
|
||||
'phone': format_phone,
|
||||
# Remove this tag
|
||||
'name': '-'
|
||||
}
|
||||
|
||||
|
|
|
@ -2,23 +2,34 @@ import codecs
|
|||
import json
|
||||
import logging
|
||||
|
||||
# Where to get the latest feed
|
||||
download_url = 'http://www.velobike.ru/proxy/parkings/'
|
||||
# What to write for the changeset's source tag
|
||||
source = 'velobike.ru'
|
||||
# These two lines negate each other:
|
||||
dataset_id = 'velobike'
|
||||
# We actually do not use ref:velobike tag
|
||||
no_dataset_id = True
|
||||
# Overpass API query: [amenity="bicycle_rental"][network="Велобайк"]
|
||||
query = [('amenity', 'bicycle_rental'), ('network', 'Велобайк')]
|
||||
# Maximum lookup radius is 100 meters
|
||||
max_distance = 100
|
||||
# The overpass query chooses all relevant points,
|
||||
# so points that are not in the dataset should be deleted
|
||||
delete_unmatched = True
|
||||
# If delete_unmatched were False, we'd be retagging these parkings:
|
||||
tag_unmatched = {
|
||||
'fixme': 'Проверить на местности: в данных велобайка отсутствует. Вероятно, демонтирована',
|
||||
'amenity': None,
|
||||
'was:amenity': 'bicycle_rental'
|
||||
}
|
||||
# Overwriting these tags
|
||||
master_tags = ('ref', 'capacity', 'capacity:electric', 'contact:email',
|
||||
'contact:phone', 'contact:website', 'operator')
|
||||
|
||||
|
||||
def dataset(fileobj):
|
||||
# Specifying utf-8 is important, otherwise you'd get "bytes" instead of "str"
|
||||
source = json.load(codecs.getreader('utf-8')(fileobj))
|
||||
data = []
|
||||
for el in source['Items']:
|
||||
|
|
Loading…
Add table
Reference in a new issue