Sugerencia
Este procedimiento es parte del curso que le muestra cómo crear un inicio rápido. Si aún no lo hiciste, consulta la introducción del curso.
Cada procedimiento de este curso se basa en el anterior, así que cerciorar de completar el último procedimiento y envíe el evento desde su producto antes de continuar con este.
log son generados por la aplicación. Son registros de texto basados en el tiempo que ayudan al usuario a ver lo que sucede en su sistema.
New Relic le proporciona una variedad de formas de instrumentar su aplicación para enviar logs a nuestra Logs API.
En esta lección, aprenderá a enviar logs desde su producto empleando nuestro kit de desarrollo de software (SDK) de telemetría.
import osimport randomimport datetimefrom sys import getsizeof
from newrelic_telemetry_sdk import MetricClient, GaugeMetric, CountMetric, SummaryMetricfrom newrelic_telemetry_sdk import EventClient, Event
metric_client = MetricClient(os.environ["NEW_RELIC_LICENSE_KEY"])event_client = EventClient(os.environ["NEW_RELIC_LICENSE_KEY"])
db = {}stats = { "read_response_times": [], "read_errors": 0, "read_count": 0, "create_response_times": [], "create_errors": 0, "create_count": 0, "update_response_times": [], "update_errors": 0, "update_count": 0, "delete_response_times": [], "delete_errors": 0, "delete_count": 0, "cache_hit": 0,}last_push = { "read": datetime.datetime.now(), "create": datetime.datetime.now(), "update": datetime.datetime.now(), "delete": datetime.datetime.now(),}
def read(key):
print(f"Reading...")
if random.randint(0, 30) > 10: stats["cache_hit"] += 1
stats["read_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["read_errors"] += 1 stats["read_count"] += 1 try_send("read")
def create(key, value):
print(f"Writing...")
db[key] = value stats["create_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["create_errors"] += 1 stats["create_count"] += 1 try_send("create")
def update(key, value):
print(f"Updating...")
db[key] = value stats["update_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["update_errors"] += 1 stats["update_count"] += 1 try_send("update")
def delete(key):
print(f"Deleting...")
db.pop(key, None) stats["delete_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["delete_errors"] += 1 stats["delete_count"] += 1 try_send("delete")
def try_send(type_):
print("try_send")
now = datetime.datetime.now() interval_ms = (now - last_push[type_]).total_seconds() * 1000 if interval_ms >= 2000: send_metrics(type_, interval_ms) send_event(type_)
def send_metrics(type_, interval_ms): print("sending metrics...")
keys = GaugeMetric("fdb_keys", len(db)) db_size = GaugeMetric("fdb_size", getsizeof(db))
errors = CountMetric( name=f"fdb_{type_}_errors", value=stats[f"{type_}_errors"], interval_ms=interval_ms )
cache_hits = CountMetric( name=f"fdb_cache_hits", value=stats["cache_hit"], interval_ms=interval_ms )
response_times = stats[f"{type_}_response_times"] response_time_summary = SummaryMetric( f"fdb_{type_}_responses", count=len(response_times), min=min(response_times), max=max(response_times), sum=sum(response_times), interval_ms=interval_ms, )
batch = [keys, db_size, errors, cache_hits, response_time_summary] response = metric_client.send_batch(batch) response.raise_for_status() print("Sent metrics successfully!") clear(type_)
def send_event(type_):
print("sending event...")
count = Event( "fdb_method", {"method": type_} )
response = event_client.send_batch(count) response.raise_for_status() print("Event sent successfully!")
def clear(type_): stats[f"{type_}_response_times"] = [] stats[f"{type_}_errors"] = 0 stats["cache_hit"] = 0 stats[f"{type_}_count"] = 0 last_push[type_] = datetime.datetime.now()
Emplee nuestro SDK
Ofrecemos un SDK de telemetría de código abierto en varios de los lenguajes de programación más populares. Estos envían datos a nuestra de ingesta de API datos, incluida nuestra Log API. De estos SDK de lenguaje, Python y Java funcionan con la Log API.
En esta lección, aprenderá cómo instalar y usar el SDK de telemetría de Python para enviar logs a New Relic.
Cambie al directorio send-logs/flashDB
del repositorio del curso.
$cd ../../send-events/flashDB
Si aún no lo hizo, instale el paquete newrelic-telemetry-sdk
.
$pip install newrelic-telemetry-sdk
Abra el archivo db.py
en el IDE de su elección y configure el LogClient
.
import osimport randomimport datetimefrom sys import getsizeof
from newrelic_telemetry_sdk import MetricClient, GaugeMetric, CountMetric, SummaryMetricfrom newrelic_telemetry_sdk import EventClient, Eventfrom newrelic_telemetry_sdk import LogClient
metric_client = MetricClient(os.environ["NEW_RELIC_LICENSE_KEY"])event_client = EventClient(os.environ["NEW_RELIC_LICENSE_KEY"])log_client = LogClient(os.environ["NEW_RELIC_LICENSE_KEY"])
db = {}stats = { "read_response_times": [], "read_errors": 0, "read_count": 0, "create_response_times": [], "create_errors": 0, "create_count": 0, "update_response_times": [], "update_errors": 0, "update_count": 0, "delete_response_times": [], "delete_errors": 0, "delete_count": 0, "cache_hit": 0,}last_push = { "read": datetime.datetime.now(), "create": datetime.datetime.now(), "update": datetime.datetime.now(), "delete": datetime.datetime.now(),}
def read(key):
print(f"Reading...")
if random.randint(0, 30) > 10: stats["cache_hit"] += 1
stats["read_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["read_errors"] += 1 stats["read_count"] += 1 try_send("read")
def create(key, value):
print(f"Writing...")
db[key] = value stats["create_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["create_errors"] += 1 stats["create_count"] += 1 try_send("create")
def update(key, value):
print(f"Updating...")
db[key] = value stats["update_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["update_errors"] += 1 stats["update_count"] += 1 try_send("update")
def delete(key):
print(f"Deleting...")
db.pop(key, None) stats["delete_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["delete_errors"] += 1 stats["delete_count"] += 1 try_send("delete")
def try_send(type_):
print("try_send")
now = datetime.datetime.now() interval_ms = (now - last_push[type_]).total_seconds() * 1000 if interval_ms >= 2000: send_metrics(type_, interval_ms) send_event(type_)
def send_metrics(type_, interval_ms):
print("sending metrics...")
keys = GaugeMetric("fdb_keys", len(db)) db_size = GaugeMetric("fdb_size", getsizeof(db))
errors = CountMetric( name=f"fdb_{type_}_errors", value=stats[f"{type_}_errors"], interval_ms=interval_ms )
cache_hits = CountMetric( name=f"fdb_cache_hits", value=stats["cache_hit"], interval_ms=interval_ms )
response_times = stats[f"{type_}_response_times"] response_time_summary = SummaryMetric( f"fdb_{type_}_responses", count=len(response_times), min=min(response_times), max=max(response_times), sum=sum(response_times), interval_ms=interval_ms, )
batch = [keys, db_size, errors, cache_hits, response_time_summary] response = metric_client.send_batch(batch) response.raise_for_status() print("Sent metrics successfully!") clear(type_)
def send_event(type_):
print("sending event...")
count = Event( "fdb_method", {"method": type_} )
response = event_client.send_batch(count) response.raise_for_status() print("Event sent successfully!")
def clear(type_): stats[f"{type_}_response_times"] = [] stats[f"{type_}_errors"] = 0 stats["cache_hit"] = 0 stats[f"{type_}_count"] = 0 last_push[type_] = datetime.datetime.now()
Importante
Este ejemplo espera una variable de entorno llamada $NEW_RELIC_LICENSE_KEY
.
Instrumente su aplicación para enviar logs a New Relic.
import osimport randomimport datetimefrom sys import getsizeofimport psutil
from newrelic_telemetry_sdk import MetricClient, GaugeMetric, CountMetric, SummaryMetricfrom newrelic_telemetry_sdk import EventClient, Eventfrom newrelic_telemetry_sdk import LogClient, Log
metric_client = MetricClient(os.environ["NEW_RELIC_LICENSE_KEY"])event_client = EventClient(os.environ["NEW_RELIC_LICENSE_KEY"])log_client = LogClient(os.environ["NEW_RELIC_LICENSE_KEY"])
db = {}stats = { "read_response_times": [], "read_errors": 0, "read_count": 0, "create_response_times": [], "create_errors": 0, "create_count": 0, "update_response_times": [], "update_errors": 0, "update_count": 0, "delete_response_times": [], "delete_errors": 0, "delete_count": 0, "cache_hit": 0,}last_push = { "read": datetime.datetime.now(), "create": datetime.datetime.now(), "update": datetime.datetime.now(), "delete": datetime.datetime.now(),}
def read(key):
print(f"Reading...")
if random.randint(0, 30) > 10: stats["cache_hit"] += 1
stats["read_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["read_errors"] += 1 stats["read_count"] += 1 try_send("read")
def create(key, value):
print(f"Writing...")
db[key] = value stats["create_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["create_errors"] += 1 stats["create_count"] += 1 try_send("create")
def update(key, value):
print(f"Updating...")
db[key] = value stats["update_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["update_errors"] += 1 stats["update_count"] += 1 try_send("update")
def delete(key):
print(f"Deleting...")
db.pop(key, None) stats["delete_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["delete_errors"] += 1 stats["delete_count"] += 1 try_send("delete")
def try_send(type_):
print("try_send")
now = datetime.datetime.now() interval_ms = (now - last_push[type_]).total_seconds() * 1000 if interval_ms >= 2000: send_metrics(type_, interval_ms) send_event(type_)
def send_metrics(type_, interval_ms):
print("sending metrics...")
keys = GaugeMetric("fdb_keys", len(db)) db_size = GaugeMetric("fdb_size", getsizeof(db))
errors = CountMetric( name=f"fdb_{type_}_errors", value=stats[f"{type_}_errors"], interval_ms=interval_ms )
cache_hits = CountMetric( name=f"fdb_cache_hits", value=stats["cache_hit"], interval_ms=interval_ms )
response_times = stats[f"{type_}_response_times"] response_time_summary = SummaryMetric( f"fdb_{type_}_responses", count=len(response_times), min=min(response_times), max=max(response_times), sum=sum(response_times), interval_ms=interval_ms, )
batch = [keys, db_size, errors, cache_hits, response_time_summary] response = metric_client.send_batch(batch) response.raise_for_status() print("Sent metrics successfully!") clear(type_)
def send_event(type_):
print("sending event...")
count = Event( "fdb_method", {"method": type_} )
response = event_client.send_batch(count) response.raise_for_status() print("Event sent successfully!")
def send_logs():
print("sending log...")
process = psutil.Process(os.getpid()) memory_usage = process.memory_percent()
log = Log("FlashDB is using " + str(round(memory_usage * 100, 2)) + "% memory")
response = log_client.send(log) response.raise_for_status() print("Log sent successfully!")
def clear(type_): stats[f"{type_}_response_times"] = [] stats[f"{type_}_errors"] = 0 stats["cache_hit"] = 0 stats[f"{type_}_count"] = 0 last_push[type_] = datetime.datetime.now()
Aquí, instrumenta su plataforma para enviar memory_usage
como log a New Relic.
Modifique el módulo try_send
para enviar logs cada 2 segundos.
import osimport randomimport datetimefrom sys import getsizeofimport psutil
from newrelic_telemetry_sdk import MetricClient, GaugeMetric, CountMetric, SummaryMetricfrom newrelic_telemetry_sdk import EventClient, Eventfrom newrelic_telemetry_sdk import LogClient, Log
metric_client = MetricClient(os.environ["NEW_RELIC_LICENSE_KEY"])event_client = EventClient(os.environ["NEW_RELIC_LICENSE_KEY"])log_client = LogClient(os.environ["NEW_RELIC_LICENSE_KEY"])
db = {}stats = { "read_response_times": [], "read_errors": 0, "read_count": 0, "create_response_times": [], "create_errors": 0, "create_count": 0, "update_response_times": [], "update_errors": 0, "update_count": 0, "delete_response_times": [], "delete_errors": 0, "delete_count": 0, "cache_hit": 0,}last_push = { "read": datetime.datetime.now(), "create": datetime.datetime.now(), "update": datetime.datetime.now(), "delete": datetime.datetime.now(),}
def read(key):
print(f"Reading...")
if random.randint(0, 30) > 10: stats["cache_hit"] += 1
stats["read_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["read_errors"] += 1 stats["read_count"] += 1 try_send("read")
def create(key, value):
print(f"Writing...")
db[key] = value stats["create_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["create_errors"] += 1 stats["create_count"] += 1 try_send("create")
def update(key, value):
print(f"Updating...")
db[key] = value stats["update_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["update_errors"] += 1 stats["update_count"] += 1 try_send("update")
def delete(key):
print(f"Deleting...")
db.pop(key, None) stats["delete_response_times"].append(random.uniform(0.5, 1.0)) if random.choice([True, False]): stats["delete_errors"] += 1 stats["delete_count"] += 1 try_send("delete")
def try_send(type_):
print("try_send")
now = datetime.datetime.now() interval_ms = (now - last_push[type_]).total_seconds() * 1000 if interval_ms >= 2000: send_metrics(type_, interval_ms) send_event(type_) send_logs()
def send_metrics(type_, interval_ms):
print("sending metrics...")
keys = GaugeMetric("fdb_keys", len(db)) db_size = GaugeMetric("fdb_size", getsizeof(db))
errors = CountMetric( name=f"fdb_{type_}_errors", value=stats[f"{type_}_errors"], interval_ms=interval_ms )
cache_hits = CountMetric( name=f"fdb_cache_hits", value=stats["cache_hit"], interval_ms=interval_ms )
response_times = stats[f"{type_}_response_times"] response_time_summary = SummaryMetric( f"fdb_{type_}_responses", count=len(response_times), min=min(response_times), max=max(response_times), sum=sum(response_times), interval_ms=interval_ms, )
batch = [keys, db_size, errors, cache_hits, response_time_summary] response = metric_client.send_batch(batch) response.raise_for_status() print("Sent metrics successfully!") clear(type_)
def send_event(type_):
print("sending event...")
count = Event( "fdb_method", {"method": type_} )
response = event_client.send_batch(count) response.raise_for_status() print("Event sent successfully!")
def send_logs():
print("sending log...")
process = psutil.Process(os.getpid()) memory_usage = process.memory_percent()
log = Log("FlashDB is using " + str(round(memory_usage * 100, 2)) + "% memory")
response = log_client.send(log) response.raise_for_status() print("Log sent successfully!")
def clear(type_): stats[f"{type_}_response_times"] = [] stats[f"{type_}_errors"] = 0 stats["cache_hit"] = 0 stats[f"{type_}_count"] = 0 last_push[type_] = datetime.datetime.now()
Tu plataforma ahora reportará el logs configurados cada 2 segundos.
Navegue hasta la raíz de su aplicación en build-a-quickstart-lab/send-logs/flashDB
.
Ejecute sus servicios para verificar que esté informando logs.
$python simulator.pyWriting...try_sendReading...try_sendReading...try_sendWriting...try_sendWriting...try_sendReading...sending metrics...Sent metrics successfully!sending event...Event sent successfully!sending log...Log sent successfully!
Opciones alternativas
Si el SDK de idioma no se ajusta a sus necesidades, pruebe una de nuestras otras opciones:
- New Relic ofrece una variedad de soluciones de reenvío de logs que le permiten recopilar logs del sistema operativo, plataforma en la nube que incluye Amazon AWS, [Google Cloud Platform], Microsoft Azure y Heroku, Kubernetes, Docker y APM.
- Implementación manual: si las opciones anteriores no se ajustan a sus requisitos, siempre puede implementar manualmente su propia biblioteca para realizar una solicitud POST a la New Relic Log API.
En este procedimiento, instrumentó su servicio para enviar logs a New Relic. A continuación, instrúyalo para enviar la traza.
Sugerencia
Este procedimiento es parte del curso que le muestra cómo crear un inicio rápido. Continúe con la siguiente lección, envíe la traza de su producto.